From 15a3b9743b43cdeeb845c7ab2ba3e0c7108bde2b Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Tue, 24 Feb 2026 11:58:47 +0800 Subject: [PATCH 001/120] =?UTF-8?q?fix(handler):=20=E5=8A=A0=E5=9B=BA?= =?UTF-8?q?=E7=BD=91=E5=85=B3=E5=BC=82=E5=B8=B8=E5=85=9C=E5=BA=95=E5=B9=B6?= =?UTF-8?q?=E8=A1=A5=E5=85=85=E8=AF=A6=E7=BB=86=E5=8D=95=E5=85=83=E6=B5=8B?= =?UTF-8?q?=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 为 OpenAI Responses 增加依赖自检与 panic recover 兜底,避免异常击穿请求链路。 为 Gateway/OpenAI/Sora 的使用量任务同步回退路径增加 panic 防护,并验证 panic 后后续任务仍可执行。 补充依赖检查与 recover 相关边界用例,覆盖不覆盖已写响应、无 panic 不写响应等行为。 Co-Authored-By: Claude Opus 4.6 --- backend/internal/handler/gateway_handler.go | 8 + .../handler/openai_gateway_handler.go | 80 +++++++- .../handler/openai_gateway_handler_test.go | 191 ++++++++++++++++++ .../internal/handler/sora_gateway_handler.go | 8 + .../handler/usage_record_submit_task_test.go | 48 +++++ 5 files changed, 332 insertions(+), 3 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 4b32969fe..028e4e118 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -1528,5 +1528,13 @@ func (h *GatewayHandler) submitUsageRecordTask(task service.UsageRecordTask) { // 回退路径:worker 池未注入时同步执行,避免退回到无界 goroutine 模式。 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + defer func() { + if recovered := recover(); recovered != nil { + logger.L().With( + zap.String("component", "handler.gateway.messages"), + zap.Any("panic", recovered), + ).Error("gateway.usage_record_task_panic_recovered") + } + }() task(ctx) } diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 50af684d0..a1c240c77 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -7,6 +7,7 @@ import ( "fmt" "io" "net/http" + "runtime/debug" "strings" "time" @@ -64,6 +65,10 @@ func NewOpenAIGatewayHandler( // Responses handles OpenAI Responses API endpoint // POST /openai/v1/responses func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { + // 局部兜底:确保该 handler 内部任何 panic 都不会击穿到进程级。 + streamStarted := false + defer h.recoverResponsesPanic(c, &streamStarted) + requestStart := time.Now() // Get apiKey and user from context (set by ApiKeyAuth middleware) @@ -85,6 +90,9 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { zap.Int64("api_key_id", apiKey.ID), zap.Any("group_id", apiKey.GroupID), ) + if !h.ensureResponsesDependencies(c, reqLog) { + return + } // Read request body body, err := io.ReadAll(c.Request.Body) @@ -159,9 +167,6 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } } - // Track if we've started streaming (for error handling) - streamStarted := false - // 绑定错误透传服务,允许 service 层在非 failover 错误场景复用规则。 if h.errorPassthroughService != nil { service.BindErrorPassthroughService(c, h.errorPassthroughService) @@ -411,6 +416,67 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } } +func (h *OpenAIGatewayHandler) recoverResponsesPanic(c *gin.Context, streamStarted *bool) { + recovered := recover() + if recovered == nil { + return + } + + started := false + if streamStarted != nil { + started = *streamStarted + } + wroteFallback := h.ensureForwardErrorResponse(c, started) + requestLogger(c, "handler.openai_gateway.responses").Error( + "openai.responses_panic_recovered", + zap.Bool("fallback_error_response_written", wroteFallback), + zap.Any("panic", recovered), + zap.ByteString("stack", debug.Stack()), + ) +} + +func (h *OpenAIGatewayHandler) ensureResponsesDependencies(c *gin.Context, reqLog *zap.Logger) bool { + missing := h.missingResponsesDependencies() + if len(missing) == 0 { + return true + } + + if reqLog == nil { + reqLog = requestLogger(c, "handler.openai_gateway.responses") + } + reqLog.Error("openai.handler_dependencies_missing", zap.Strings("missing_dependencies", missing)) + + if c != nil && c.Writer != nil && !c.Writer.Written() { + c.JSON(http.StatusServiceUnavailable, gin.H{ + "error": gin.H{ + "type": "api_error", + "message": "Service temporarily unavailable", + }, + }) + } + return false +} + +func (h *OpenAIGatewayHandler) missingResponsesDependencies() []string { + missing := make([]string, 0, 5) + if h == nil { + return append(missing, "handler") + } + if h.gatewayService == nil { + missing = append(missing, "gatewayService") + } + if h.billingCacheService == nil { + missing = append(missing, "billingCacheService") + } + if h.apiKeyService == nil { + missing = append(missing, "apiKeyService") + } + if h.concurrencyHelper == nil || h.concurrencyHelper.concurrencyService == nil { + missing = append(missing, "concurrencyHelper") + } + return missing +} + func getContextInt64(c *gin.Context, key string) (int64, bool) { if c == nil || key == "" { return 0, false @@ -444,6 +510,14 @@ func (h *OpenAIGatewayHandler) submitUsageRecordTask(task service.UsageRecordTas // 回退路径:worker 池未注入时同步执行,避免退回到无界 goroutine 模式。 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + defer func() { + if recovered := recover(); recovered != nil { + logger.L().With( + zap.String("component", "handler.openai_gateway.responses"), + zap.Any("panic", recovered), + ).Error("openai.usage_record_task_panic_recovered") + } + }() task(ctx) } diff --git a/backend/internal/handler/openai_gateway_handler_test.go b/backend/internal/handler/openai_gateway_handler_test.go index 1ca52c2d9..a80867c4a 100644 --- a/backend/internal/handler/openai_gateway_handler_test.go +++ b/backend/internal/handler/openai_gateway_handler_test.go @@ -7,6 +7,8 @@ import ( "strings" "testing" + "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -141,6 +143,195 @@ func TestOpenAIEnsureForwardErrorResponse_DoesNotOverrideWrittenResponse(t *test assert.Equal(t, "already written", w.Body.String()) } +func TestOpenAIRecoverResponsesPanic_WritesFallbackResponse(t *testing.T) { + gin.SetMode(gin.TestMode) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil) + + h := &OpenAIGatewayHandler{} + streamStarted := false + require.NotPanics(t, func() { + func() { + defer h.recoverResponsesPanic(c, &streamStarted) + panic("test panic") + }() + }) + + require.Equal(t, http.StatusBadGateway, w.Code) + + var parsed map[string]any + err := json.Unmarshal(w.Body.Bytes(), &parsed) + require.NoError(t, err) + + errorObj, ok := parsed["error"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "upstream_error", errorObj["type"]) + assert.Equal(t, "Upstream request failed", errorObj["message"]) +} + +func TestOpenAIRecoverResponsesPanic_NoPanicNoWrite(t *testing.T) { + gin.SetMode(gin.TestMode) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil) + + h := &OpenAIGatewayHandler{} + streamStarted := false + require.NotPanics(t, func() { + func() { + defer h.recoverResponsesPanic(c, &streamStarted) + }() + }) + + require.False(t, c.Writer.Written()) + assert.Equal(t, "", w.Body.String()) +} + +func TestOpenAIRecoverResponsesPanic_DoesNotOverrideWrittenResponse(t *testing.T) { + gin.SetMode(gin.TestMode) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil) + c.String(http.StatusTeapot, "already written") + + h := &OpenAIGatewayHandler{} + streamStarted := false + require.NotPanics(t, func() { + func() { + defer h.recoverResponsesPanic(c, &streamStarted) + panic("test panic") + }() + }) + + require.Equal(t, http.StatusTeapot, w.Code) + assert.Equal(t, "already written", w.Body.String()) +} + +func TestOpenAIMissingResponsesDependencies(t *testing.T) { + t.Run("nil_handler", func(t *testing.T) { + var h *OpenAIGatewayHandler + require.Equal(t, []string{"handler"}, h.missingResponsesDependencies()) + }) + + t.Run("all_dependencies_missing", func(t *testing.T) { + h := &OpenAIGatewayHandler{} + require.Equal(t, + []string{"gatewayService", "billingCacheService", "apiKeyService", "concurrencyHelper"}, + h.missingResponsesDependencies(), + ) + }) + + t.Run("all_dependencies_present", func(t *testing.T) { + h := &OpenAIGatewayHandler{ + gatewayService: &service.OpenAIGatewayService{}, + billingCacheService: &service.BillingCacheService{}, + apiKeyService: &service.APIKeyService{}, + concurrencyHelper: &ConcurrencyHelper{ + concurrencyService: &service.ConcurrencyService{}, + }, + } + require.Empty(t, h.missingResponsesDependencies()) + }) +} + +func TestOpenAIEnsureResponsesDependencies(t *testing.T) { + t.Run("missing_dependencies_returns_503", func(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil) + + h := &OpenAIGatewayHandler{} + ok := h.ensureResponsesDependencies(c, nil) + + require.False(t, ok) + require.Equal(t, http.StatusServiceUnavailable, w.Code) + var parsed map[string]any + err := json.Unmarshal(w.Body.Bytes(), &parsed) + require.NoError(t, err) + errorObj, exists := parsed["error"].(map[string]any) + require.True(t, exists) + assert.Equal(t, "api_error", errorObj["type"]) + assert.Equal(t, "Service temporarily unavailable", errorObj["message"]) + }) + + t.Run("already_written_response_not_overridden", func(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil) + c.String(http.StatusTeapot, "already written") + + h := &OpenAIGatewayHandler{} + ok := h.ensureResponsesDependencies(c, nil) + + require.False(t, ok) + require.Equal(t, http.StatusTeapot, w.Code) + assert.Equal(t, "already written", w.Body.String()) + }) + + t.Run("dependencies_ready_returns_true_and_no_write", func(t *testing.T) { + gin.SetMode(gin.TestMode) + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", nil) + + h := &OpenAIGatewayHandler{ + gatewayService: &service.OpenAIGatewayService{}, + billingCacheService: &service.BillingCacheService{}, + apiKeyService: &service.APIKeyService{}, + concurrencyHelper: &ConcurrencyHelper{ + concurrencyService: &service.ConcurrencyService{}, + }, + } + ok := h.ensureResponsesDependencies(c, nil) + + require.True(t, ok) + require.False(t, c.Writer.Written()) + assert.Equal(t, "", w.Body.String()) + }) +} + +func TestOpenAIResponses_MissingDependencies_ReturnsServiceUnavailable(t *testing.T) { + gin.SetMode(gin.TestMode) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(`{"model":"gpt-5","stream":false}`)) + c.Request.Header.Set("Content-Type", "application/json") + + groupID := int64(2) + c.Set(string(middleware.ContextKeyAPIKey), &service.APIKey{ + ID: 10, + GroupID: &groupID, + }) + c.Set(string(middleware.ContextKeyUser), middleware.AuthSubject{ + UserID: 1, + Concurrency: 1, + }) + + // 故意使用未初始化依赖,验证快速失败而不是崩溃。 + h := &OpenAIGatewayHandler{} + require.NotPanics(t, func() { + h.Responses(c) + }) + + require.Equal(t, http.StatusServiceUnavailable, w.Code) + + var parsed map[string]any + err := json.Unmarshal(w.Body.Bytes(), &parsed) + require.NoError(t, err) + + errorObj, ok := parsed["error"].(map[string]any) + require.True(t, ok) + assert.Equal(t, "api_error", errorObj["type"]) + assert.Equal(t, "Service temporarily unavailable", errorObj["message"]) +} + // TestOpenAIHandler_GjsonExtraction 验证 gjson 从请求体中提取 model/stream 的正确性 func TestOpenAIHandler_GjsonExtraction(t *testing.T) { tests := []struct { diff --git a/backend/internal/handler/sora_gateway_handler.go b/backend/internal/handler/sora_gateway_handler.go index ab3a3f14f..ccec0a812 100644 --- a/backend/internal/handler/sora_gateway_handler.go +++ b/backend/internal/handler/sora_gateway_handler.go @@ -461,6 +461,14 @@ func (h *SoraGatewayHandler) submitUsageRecordTask(task service.UsageRecordTask) // 回退路径:worker 池未注入时同步执行,避免退回到无界 goroutine 模式。 ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() + defer func() { + if recovered := recover(); recovered != nil { + logger.L().With( + zap.String("component", "handler.sora_gateway.chat_completions"), + zap.Any("panic", recovered), + ).Error("sora.usage_record_task_panic_recovered") + } + }() task(ctx) } diff --git a/backend/internal/handler/usage_record_submit_task_test.go b/backend/internal/handler/usage_record_submit_task_test.go index df759f44f..c7c48e14b 100644 --- a/backend/internal/handler/usage_record_submit_task_test.go +++ b/backend/internal/handler/usage_record_submit_task_test.go @@ -61,6 +61,22 @@ func TestGatewayHandlerSubmitUsageRecordTask_NilTask(t *testing.T) { }) } +func TestGatewayHandlerSubmitUsageRecordTask_WithoutPool_TaskPanicRecovered(t *testing.T) { + h := &GatewayHandler{} + var called atomic.Bool + + require.NotPanics(t, func() { + h.submitUsageRecordTask(func(ctx context.Context) { + panic("usage task panic") + }) + }) + + h.submitUsageRecordTask(func(ctx context.Context) { + called.Store(true) + }) + require.True(t, called.Load(), "panic 后后续任务应仍可执行") +} + func TestOpenAIGatewayHandlerSubmitUsageRecordTask_WithPool(t *testing.T) { pool := newUsageRecordTestPool(t) h := &OpenAIGatewayHandler{usageRecordWorkerPool: pool} @@ -98,6 +114,22 @@ func TestOpenAIGatewayHandlerSubmitUsageRecordTask_NilTask(t *testing.T) { }) } +func TestOpenAIGatewayHandlerSubmitUsageRecordTask_WithoutPool_TaskPanicRecovered(t *testing.T) { + h := &OpenAIGatewayHandler{} + var called atomic.Bool + + require.NotPanics(t, func() { + h.submitUsageRecordTask(func(ctx context.Context) { + panic("usage task panic") + }) + }) + + h.submitUsageRecordTask(func(ctx context.Context) { + called.Store(true) + }) + require.True(t, called.Load(), "panic 后后续任务应仍可执行") +} + func TestSoraGatewayHandlerSubmitUsageRecordTask_WithPool(t *testing.T) { pool := newUsageRecordTestPool(t) h := &SoraGatewayHandler{usageRecordWorkerPool: pool} @@ -134,3 +166,19 @@ func TestSoraGatewayHandlerSubmitUsageRecordTask_NilTask(t *testing.T) { h.submitUsageRecordTask(nil) }) } + +func TestSoraGatewayHandlerSubmitUsageRecordTask_WithoutPool_TaskPanicRecovered(t *testing.T) { + h := &SoraGatewayHandler{} + var called atomic.Bool + + require.NotPanics(t, func() { + h.submitUsageRecordTask(func(ctx context.Context) { + panic("usage task panic") + }) + }) + + h.submitUsageRecordTask(func(ctx context.Context) { + called.Store(true) + }) + require.True(t, called.Load(), "panic 后后续任务应仍可执行") +} From e0c4384a33dc54d35014484935d74fba2b28d0e0 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Tue, 24 Feb 2026 16:51:45 +0800 Subject: [PATCH 002/120] =?UTF-8?q?feat(data-management):=20=E5=AE=9E?= =?UTF-8?q?=E7=8E=B0=20backupd=20=E6=95=B0=E6=8D=AE=E7=AE=A1=E7=90=86?= =?UTF-8?q?=E4=B8=8E=E5=A4=87=E4=BB=BD=E6=89=A7=E8=A1=8C=E9=93=BE=E8=B7=AF?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Makefile | 9 +- README_CN.md | 12 + backend/cmd/server/wire_gen.go | 4 +- backend/go.mod | 8 +- backend/go.sum | 16 +- .../backup/proto/backup/v1/backup.pb.go | 1479 +++++ .../backup/proto/backup/v1/backup.proto | 139 + .../backup/proto/backup/v1/backup_grpc.pb.go | 349 ++ .../handler/admin/data_management_handler.go | 225 + .../admin/data_management_handler_test.go | 78 + backend/internal/handler/handler.go | 1 + backend/internal/handler/wire.go | 3 + backend/internal/server/routes/admin.go | 16 + .../internal/service/data_management_grpc.go | 533 ++ .../service/data_management_grpc_test.go | 122 + .../service/data_management_service.go | 123 + .../service/data_management_service_test.go | 91 + backend/internal/service/wire.go | 1 + backup/README.md | 49 + backup/cmd/backupd/main.go | 114 + backup/ent/backupjob.go | 319 ++ backup/ent/backupjob/backupjob.go | 275 + backup/ent/backupjob/where.go | 1104 ++++ backup/ent/backupjob_create.go | 550 ++ backup/ent/backupjob_delete.go | 88 + backup/ent/backupjob_query.go | 606 ++ backup/ent/backupjob_update.go | 1121 ++++ backup/ent/backupjobevent.go | 201 + backup/ent/backupjobevent/backupjobevent.go | 158 + backup/ent/backupjobevent/where.go | 449 ++ backup/ent/backupjobevent_create.go | 354 ++ backup/ent/backupjobevent_delete.go | 88 + backup/ent/backupjobevent_query.go | 606 ++ backup/ent/backupjobevent_update.go | 517 ++ backup/ent/backups3config.go | 217 + backup/ent/backups3config/backups3config.go | 154 + backup/ent/backups3config/where.go | 635 +++ backup/ent/backups3config_create.go | 445 ++ backup/ent/backups3config_delete.go | 88 + backup/ent/backups3config_query.go | 527 ++ backup/ent/backups3config_update.go | 536 ++ backup/ent/backupsetting.go | 172 + backup/ent/backupsetting/backupsetting.go | 141 + backup/ent/backupsetting/where.go | 410 ++ backup/ent/backupsetting_create.go | 357 ++ backup/ent/backupsetting_delete.go | 88 + backup/ent/backupsetting_query.go | 527 ++ backup/ent/backupsetting_update.go | 448 ++ backup/ent/backupsourceconfig.go | 232 + .../backupsourceconfig/backupsourceconfig.go | 172 + backup/ent/backupsourceconfig/where.go | 840 +++ backup/ent/backupsourceconfig_create.go | 414 ++ backup/ent/backupsourceconfig_delete.go | 88 + backup/ent/backupsourceconfig_query.go | 527 ++ backup/ent/backupsourceconfig_update.go | 762 +++ backup/ent/client.go | 947 ++++ backup/ent/ent.go | 616 ++ backup/ent/enttest/enttest.go | 84 + backup/ent/generate.go | 3 + backup/ent/hook/hook.go | 247 + backup/ent/migrate/migrate.go | 64 + backup/ent/migrate/schema.go | 166 + backup/ent/mutation.go | 5044 +++++++++++++++++ backup/ent/predicate/predicate.go | 22 + backup/ent/runtime.go | 142 + backup/ent/runtime/runtime.go | 10 + backup/ent/schema/backup_job.go | 50 + backup/ent/schema/backup_job_event.go | 38 + backup/ent/schema/backup_s3_config.go | 28 + backup/ent/schema/backup_setting.go | 24 + backup/ent/schema/backup_source_config.go | 36 + backup/ent/tx.go | 222 + backup/go.mod | 62 + backup/go.sum | 174 + backup/internal/artifact/doc.go | 1 + backup/internal/config/doc.go | 1 + backup/internal/executor/doc.go | 1 + backup/internal/executor/runner.go | 755 +++ backup/internal/executor/runner_test.go | 110 + backup/internal/grpcserver/doc.go | 1 + backup/internal/grpcserver/interceptor.go | 131 + .../internal/grpcserver/interceptor_test.go | 50 + backup/internal/grpcserver/server.go | 342 ++ backup/internal/s3client/client.go | 142 + backup/internal/s3client/doc.go | 1 + backup/internal/store/entstore/doc.go | 1 + backup/internal/store/entstore/store.go | 729 +++ backup/internal/store/entstore/store_test.go | 153 + backup/proto/backup/v1/backup.pb.go | 1479 +++++ backup/proto/backup/v1/backup.proto | 139 + backup/proto/backup/v1/backup_grpc.pb.go | 349 ++ deploy/BACKUPD_CN.md | 78 + deploy/README.md | 11 + deploy/docker-compose.override.yml.example | 13 + deploy/install-backupd.sh | 123 + deploy/sub2api-backupd.service | 22 + frontend/src/api/admin/dataManagement.ts | 181 + frontend/src/api/admin/index.ts | 8 +- frontend/src/components/layout/AppSidebar.vue | 34 +- frontend/src/i18n/locales/en.ts | 112 + frontend/src/i18n/locales/zh.ts | 112 + frontend/src/router/index.ts | 12 + .../src/views/admin/DataManagementView.vue | 580 ++ 103 files changed, 31218 insertions(+), 20 deletions(-) create mode 100644 backend/internal/backup/proto/backup/v1/backup.pb.go create mode 100644 backend/internal/backup/proto/backup/v1/backup.proto create mode 100644 backend/internal/backup/proto/backup/v1/backup_grpc.pb.go create mode 100644 backend/internal/handler/admin/data_management_handler.go create mode 100644 backend/internal/handler/admin/data_management_handler_test.go create mode 100644 backend/internal/service/data_management_grpc.go create mode 100644 backend/internal/service/data_management_grpc_test.go create mode 100644 backend/internal/service/data_management_service.go create mode 100644 backend/internal/service/data_management_service_test.go create mode 100644 backup/README.md create mode 100644 backup/cmd/backupd/main.go create mode 100644 backup/ent/backupjob.go create mode 100644 backup/ent/backupjob/backupjob.go create mode 100644 backup/ent/backupjob/where.go create mode 100644 backup/ent/backupjob_create.go create mode 100644 backup/ent/backupjob_delete.go create mode 100644 backup/ent/backupjob_query.go create mode 100644 backup/ent/backupjob_update.go create mode 100644 backup/ent/backupjobevent.go create mode 100644 backup/ent/backupjobevent/backupjobevent.go create mode 100644 backup/ent/backupjobevent/where.go create mode 100644 backup/ent/backupjobevent_create.go create mode 100644 backup/ent/backupjobevent_delete.go create mode 100644 backup/ent/backupjobevent_query.go create mode 100644 backup/ent/backupjobevent_update.go create mode 100644 backup/ent/backups3config.go create mode 100644 backup/ent/backups3config/backups3config.go create mode 100644 backup/ent/backups3config/where.go create mode 100644 backup/ent/backups3config_create.go create mode 100644 backup/ent/backups3config_delete.go create mode 100644 backup/ent/backups3config_query.go create mode 100644 backup/ent/backups3config_update.go create mode 100644 backup/ent/backupsetting.go create mode 100644 backup/ent/backupsetting/backupsetting.go create mode 100644 backup/ent/backupsetting/where.go create mode 100644 backup/ent/backupsetting_create.go create mode 100644 backup/ent/backupsetting_delete.go create mode 100644 backup/ent/backupsetting_query.go create mode 100644 backup/ent/backupsetting_update.go create mode 100644 backup/ent/backupsourceconfig.go create mode 100644 backup/ent/backupsourceconfig/backupsourceconfig.go create mode 100644 backup/ent/backupsourceconfig/where.go create mode 100644 backup/ent/backupsourceconfig_create.go create mode 100644 backup/ent/backupsourceconfig_delete.go create mode 100644 backup/ent/backupsourceconfig_query.go create mode 100644 backup/ent/backupsourceconfig_update.go create mode 100644 backup/ent/client.go create mode 100644 backup/ent/ent.go create mode 100644 backup/ent/enttest/enttest.go create mode 100644 backup/ent/generate.go create mode 100644 backup/ent/hook/hook.go create mode 100644 backup/ent/migrate/migrate.go create mode 100644 backup/ent/migrate/schema.go create mode 100644 backup/ent/mutation.go create mode 100644 backup/ent/predicate/predicate.go create mode 100644 backup/ent/runtime.go create mode 100644 backup/ent/runtime/runtime.go create mode 100644 backup/ent/schema/backup_job.go create mode 100644 backup/ent/schema/backup_job_event.go create mode 100644 backup/ent/schema/backup_s3_config.go create mode 100644 backup/ent/schema/backup_setting.go create mode 100644 backup/ent/schema/backup_source_config.go create mode 100644 backup/ent/tx.go create mode 100644 backup/go.mod create mode 100644 backup/go.sum create mode 100644 backup/internal/artifact/doc.go create mode 100644 backup/internal/config/doc.go create mode 100644 backup/internal/executor/doc.go create mode 100644 backup/internal/executor/runner.go create mode 100644 backup/internal/executor/runner_test.go create mode 100644 backup/internal/grpcserver/doc.go create mode 100644 backup/internal/grpcserver/interceptor.go create mode 100644 backup/internal/grpcserver/interceptor_test.go create mode 100644 backup/internal/grpcserver/server.go create mode 100644 backup/internal/s3client/client.go create mode 100644 backup/internal/s3client/doc.go create mode 100644 backup/internal/store/entstore/doc.go create mode 100644 backup/internal/store/entstore/store.go create mode 100644 backup/internal/store/entstore/store_test.go create mode 100644 backup/proto/backup/v1/backup.pb.go create mode 100644 backup/proto/backup/v1/backup.proto create mode 100644 backup/proto/backup/v1/backup_grpc.pb.go create mode 100644 deploy/BACKUPD_CN.md create mode 100755 deploy/install-backupd.sh create mode 100644 deploy/sub2api-backupd.service create mode 100644 frontend/src/api/admin/dataManagement.ts create mode 100644 frontend/src/views/admin/DataManagementView.vue diff --git a/Makefile b/Makefile index b97404ebd..99d520168 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: build build-backend build-frontend test test-backend test-frontend secret-scan +.PHONY: build build-backend build-frontend build-backupd test test-backend test-frontend test-backupd secret-scan # 一键编译前后端 build: build-backend build-frontend @@ -11,6 +11,10 @@ build-backend: build-frontend: @pnpm --dir frontend run build +# 编译 backupd(宿主机备份进程) +build-backupd: + @cd backup && go build -o backupd ./cmd/backupd + # 运行测试(后端 + 前端) test: test-backend test-frontend @@ -21,5 +25,8 @@ test-frontend: @pnpm --dir frontend run lint:check @pnpm --dir frontend run typecheck +test-backupd: + @cd backup && go test ./... + secret-scan: @python3 tools/secret_scan.py diff --git a/README_CN.md b/README_CN.md index ea35a19d8..40ff3d687 100644 --- a/README_CN.md +++ b/README_CN.md @@ -246,6 +246,18 @@ docker-compose -f docker-compose.local.yml logs -f sub2api **推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。 +#### 启用“数据管理”功能(backupd) + +如需启用管理后台“数据管理”,需要额外部署宿主机备份进程 `backupd`。 + +关键点: + +- 主进程固定探测:`/tmp/sub2api-backup.sock` +- 只有该 Socket 可连通时,数据管理功能才会开启 +- Docker 场景需将宿主机 Socket 挂载到容器同路径 + +详细部署步骤见:`deploy/BACKUPD_CN.md` + #### 访问 在浏览器中打开 `http://你的服务器IP:8080` diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 7a277112f..098333187 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -138,6 +138,8 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { sessionLimitCache := repository.ProvideSessionLimitCache(redisClient, configConfig) accountHandler := admin.NewAccountHandler(adminService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, rateLimitService, accountUsageService, accountTestService, concurrencyService, crsSyncService, sessionLimitCache, compositeTokenCacheInvalidator) adminAnnouncementHandler := admin.NewAnnouncementHandler(announcementService) + dataManagementService := service.NewDataManagementService() + dataManagementHandler := admin.NewDataManagementHandler(dataManagementService) oAuthHandler := admin.NewOAuthHandler(oAuthService) openAIOAuthHandler := admin.NewOpenAIOAuthHandler(openAIOAuthService, adminService) geminiOAuthHandler := admin.NewGeminiOAuthHandler(geminiOAuthService) @@ -183,7 +185,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { errorPassthroughCache := repository.NewErrorPassthroughCache(redisClient) errorPassthroughService := service.NewErrorPassthroughService(errorPassthroughRepository, errorPassthroughCache) errorPassthroughHandler := admin.NewErrorPassthroughHandler(errorPassthroughService) - adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler) + adminHandlers := handler.ProvideAdminHandlers(dashboardHandler, adminUserHandler, groupHandler, accountHandler, adminAnnouncementHandler, dataManagementHandler, oAuthHandler, openAIOAuthHandler, geminiOAuthHandler, antigravityOAuthHandler, proxyHandler, adminRedeemHandler, promoHandler, settingHandler, opsHandler, systemHandler, adminSubscriptionHandler, adminUsageHandler, userAttributeHandler, errorPassthroughHandler) usageRecordWorkerPool := service.NewUsageRecordWorkerPool(configConfig) gatewayHandler := handler.NewGatewayHandler(gatewayService, geminiMessagesCompatService, antigravityGatewayService, userService, concurrencyService, billingCacheService, usageService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig) openAIGatewayHandler := handler.NewOpenAIGatewayHandler(openAIGatewayService, concurrencyService, billingCacheService, apiKeyService, usageRecordWorkerPool, errorPassthroughService, configConfig) diff --git a/backend/go.mod b/backend/go.mod index 94b6fcbb1..49bf4f154 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -33,6 +33,8 @@ require ( golang.org/x/net v0.49.0 golang.org/x/sync v0.19.0 golang.org/x/term v0.39.0 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.10 gopkg.in/natefinch/lumberjack.v2 v2.2.1 gopkg.in/yaml.v3 v3.0.1 modernc.org/sqlite v1.44.3 @@ -79,7 +81,6 @@ require ( github.com/goccy/go-json v0.10.2 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect - github.com/google/subcommands v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl/v2 v2.18.1 // indirect @@ -138,7 +139,6 @@ require ( go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect - go.opentelemetry.io/otel/sdk v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/atomic v1.10.0 // indirect go.uber.org/automaxprocs v1.6.0 // indirect @@ -148,9 +148,7 @@ require ( golang.org/x/mod v0.31.0 // indirect golang.org/x/sys v0.40.0 // indirect golang.org/x/text v0.33.0 // indirect - golang.org/x/tools v0.40.0 // indirect - google.golang.org/grpc v1.75.1 // indirect - google.golang.org/protobuf v1.36.10 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect gopkg.in/ini.v1 v1.67.0 // indirect modernc.org/libc v1.67.6 // indirect modernc.org/mathutil v1.7.1 // indirect diff --git a/backend/go.sum b/backend/go.sum index f044c3a83..2aff32cbe 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -111,6 +111,8 @@ github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU= github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= @@ -120,8 +122,6 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= -github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= -github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= @@ -176,8 +176,6 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.15 h1:UNAjwbU9l54TA3KzvqLGxwWjHmMgBUVhBiTjelZgg3U= -github.com/mattn/go-runewidth v0.0.15/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= @@ -211,8 +209,6 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -242,8 +238,6 @@ github.com/refraction-networking/utls v1.8.1 h1:yNY1kapmQU8JeM1sSw2H2asfTIwWxIkr github.com/refraction-networking/utls v1.8.1/go.mod h1:jkSOEkLqn+S/jtpEHPOsVv/4V4EVnelwbMQl4vCWXAM= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY= -github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs= github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro= github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= @@ -266,8 +260,6 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= @@ -336,6 +328,8 @@ go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/Wgbsd go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= @@ -385,6 +379,8 @@ golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/genproto v0.0.0-20231106174013-bbf56f31fb17 h1:wpZ8pe2x1Q3f2KyT5f8oP/fa9rHAKgFPr/HZdNuS+PQ= google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4 h1:8XJ4pajGwOlasW+L13MnEGA8W4115jJySQtVfS2/IBU= google.golang.org/genproto/googleapis/api v0.0.0-20250929231259-57b25ae835d4/go.mod h1:NnuHhy+bxcg30o7FnVAZbXsPHUDQ9qKWAQKCD7VxFtk= diff --git a/backend/internal/backup/proto/backup/v1/backup.pb.go b/backend/internal/backup/proto/backup/v1/backup.pb.go new file mode 100644 index 000000000..180cb669b --- /dev/null +++ b/backend/internal/backup/proto/backup/v1/backup.pb.go @@ -0,0 +1,1479 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc v6.32.1 +// source: internal/backup/proto/backup/v1/backup.proto + +package backupv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HealthRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthRequest) Reset() { + *x = HealthRequest{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthRequest) ProtoMessage() {} + +func (x *HealthRequest) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead. +func (*HealthRequest) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{0} +} + +type HealthResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + UptimeSeconds int64 `protobuf:"varint,3,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthResponse) Reset() { + *x = HealthResponse{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthResponse) ProtoMessage() {} + +func (x *HealthResponse) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. +func (*HealthResponse) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *HealthResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *HealthResponse) GetUptimeSeconds() int64 { + if x != nil { + return x.UptimeSeconds + } + return 0 +} + +type SourceConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + SslMode string `protobuf:"bytes,6,opt,name=ssl_mode,json=sslMode,proto3" json:"ssl_mode,omitempty"` + Addr string `protobuf:"bytes,7,opt,name=addr,proto3" json:"addr,omitempty"` + Username string `protobuf:"bytes,8,opt,name=username,proto3" json:"username,omitempty"` + Db int32 `protobuf:"varint,9,opt,name=db,proto3" json:"db,omitempty"` + ContainerName string `protobuf:"bytes,10,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceConfig) Reset() { + *x = SourceConfig{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceConfig) ProtoMessage() {} + +func (x *SourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceConfig.ProtoReflect.Descriptor instead. +func (*SourceConfig) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{2} +} + +func (x *SourceConfig) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *SourceConfig) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *SourceConfig) GetUser() string { + if x != nil { + return x.User + } + return "" +} + +func (x *SourceConfig) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *SourceConfig) GetDatabase() string { + if x != nil { + return x.Database + } + return "" +} + +func (x *SourceConfig) GetSslMode() string { + if x != nil { + return x.SslMode + } + return "" +} + +func (x *SourceConfig) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +func (x *SourceConfig) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *SourceConfig) GetDb() int32 { + if x != nil { + return x.Db + } + return 0 +} + +func (x *SourceConfig) GetContainerName() string { + if x != nil { + return x.ContainerName + } + return "" +} + +type S3Config struct { + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Endpoint string `protobuf:"bytes,2,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` + Bucket string `protobuf:"bytes,4,opt,name=bucket,proto3" json:"bucket,omitempty"` + AccessKeyId string `protobuf:"bytes,5,opt,name=access_key_id,json=accessKeyId,proto3" json:"access_key_id,omitempty"` + SecretAccessKey string `protobuf:"bytes,6,opt,name=secret_access_key,json=secretAccessKey,proto3" json:"secret_access_key,omitempty"` + Prefix string `protobuf:"bytes,7,opt,name=prefix,proto3" json:"prefix,omitempty"` + ForcePathStyle bool `protobuf:"varint,8,opt,name=force_path_style,json=forcePathStyle,proto3" json:"force_path_style,omitempty"` + UseSsl bool `protobuf:"varint,9,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *S3Config) Reset() { + *x = S3Config{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *S3Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3Config) ProtoMessage() {} + +func (x *S3Config) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3Config.ProtoReflect.Descriptor instead. +func (*S3Config) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{3} +} + +func (x *S3Config) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *S3Config) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *S3Config) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + +func (x *S3Config) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *S3Config) GetAccessKeyId() string { + if x != nil { + return x.AccessKeyId + } + return "" +} + +func (x *S3Config) GetSecretAccessKey() string { + if x != nil { + return x.SecretAccessKey + } + return "" +} + +func (x *S3Config) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *S3Config) GetForcePathStyle() bool { + if x != nil { + return x.ForcePathStyle + } + return false +} + +func (x *S3Config) GetUseSsl() bool { + if x != nil { + return x.UseSsl + } + return false +} + +type BackupConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` + BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` + SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` + RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` + KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` + Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` + Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` + S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupConfig) Reset() { + *x = BackupConfig{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupConfig) ProtoMessage() {} + +func (x *BackupConfig) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. +func (*BackupConfig) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} +} + +func (x *BackupConfig) GetSourceMode() string { + if x != nil { + return x.SourceMode + } + return "" +} + +func (x *BackupConfig) GetBackupRoot() string { + if x != nil { + return x.BackupRoot + } + return "" +} + +func (x *BackupConfig) GetSqlitePath() string { + if x != nil { + return x.SqlitePath + } + return "" +} + +func (x *BackupConfig) GetRetentionDays() int32 { + if x != nil { + return x.RetentionDays + } + return 0 +} + +func (x *BackupConfig) GetKeepLast() int32 { + if x != nil { + return x.KeepLast + } + return 0 +} + +func (x *BackupConfig) GetPostgres() *SourceConfig { + if x != nil { + return x.Postgres + } + return nil +} + +func (x *BackupConfig) GetRedis() *SourceConfig { + if x != nil { + return x.Redis + } + return nil +} + +func (x *BackupConfig) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type GetConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigRequest) Reset() { + *x = GetConfigRequest{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigRequest) ProtoMessage() {} + +func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. +func (*GetConfigRequest) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} +} + +type GetConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigResponse) Reset() { + *x = GetConfigResponse{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigResponse) ProtoMessage() {} + +func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. +func (*GetConfigResponse) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} +} + +func (x *GetConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigRequest) Reset() { + *x = UpdateConfigRequest{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigRequest) ProtoMessage() {} + +func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateConfigRequest) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigResponse) Reset() { + *x = UpdateConfigResponse{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigResponse) ProtoMessage() {} + +func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type ValidateS3Request struct { + state protoimpl.MessageState `protogen:"open.v1"` + S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Request) Reset() { + *x = ValidateS3Request{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Request) ProtoMessage() {} + +func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. +func (*ValidateS3Request) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidateS3Request) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type ValidateS3Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Response) Reset() { + *x = ValidateS3Response{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Response) ProtoMessage() {} + +func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. +func (*ValidateS3Response) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidateS3Response) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *ValidateS3Response) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type CreateBackupJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateBackupJobRequest) Reset() { + *x = CreateBackupJobRequest{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateBackupJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupJobRequest) ProtoMessage() {} + +func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupJobRequest.ProtoReflect.Descriptor instead. +func (*CreateBackupJobRequest) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} +} + +func (x *CreateBackupJobRequest) GetBackupType() string { + if x != nil { + return x.BackupType + } + return "" +} + +func (x *CreateBackupJobRequest) GetUploadToS3() bool { + if x != nil { + return x.UploadToS3 + } + return false +} + +func (x *CreateBackupJobRequest) GetTriggeredBy() string { + if x != nil { + return x.TriggeredBy + } + return "" +} + +func (x *CreateBackupJobRequest) GetIdempotencyKey() string { + if x != nil { + return x.IdempotencyKey + } + return "" +} + +type BackupArtifact struct { + state protoimpl.MessageState `protogen:"open.v1"` + LocalPath string `protobuf:"bytes,1,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` + SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + Sha256 string `protobuf:"bytes,3,opt,name=sha256,proto3" json:"sha256,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupArtifact) Reset() { + *x = BackupArtifact{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupArtifact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupArtifact) ProtoMessage() {} + +func (x *BackupArtifact) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupArtifact.ProtoReflect.Descriptor instead. +func (*BackupArtifact) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} +} + +func (x *BackupArtifact) GetLocalPath() string { + if x != nil { + return x.LocalPath + } + return "" +} + +func (x *BackupArtifact) GetSizeBytes() int64 { + if x != nil { + return x.SizeBytes + } + return 0 +} + +func (x *BackupArtifact) GetSha256() string { + if x != nil { + return x.Sha256 + } + return "" +} + +type BackupS3Object struct { + state protoimpl.MessageState `protogen:"open.v1"` + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Etag string `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupS3Object) Reset() { + *x = BackupS3Object{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupS3Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupS3Object) ProtoMessage() {} + +func (x *BackupS3Object) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupS3Object.ProtoReflect.Descriptor instead. +func (*BackupS3Object) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} +} + +func (x *BackupS3Object) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *BackupS3Object) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *BackupS3Object) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +type BackupJob struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` + ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` + S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupJob) Reset() { + *x = BackupJob{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupJob) ProtoMessage() {} + +func (x *BackupJob) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupJob.ProtoReflect.Descriptor instead. +func (*BackupJob) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} +} + +func (x *BackupJob) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *BackupJob) GetBackupType() string { + if x != nil { + return x.BackupType + } + return "" +} + +func (x *BackupJob) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *BackupJob) GetTriggeredBy() string { + if x != nil { + return x.TriggeredBy + } + return "" +} + +func (x *BackupJob) GetIdempotencyKey() string { + if x != nil { + return x.IdempotencyKey + } + return "" +} + +func (x *BackupJob) GetUploadToS3() bool { + if x != nil { + return x.UploadToS3 + } + return false +} + +func (x *BackupJob) GetStartedAt() string { + if x != nil { + return x.StartedAt + } + return "" +} + +func (x *BackupJob) GetFinishedAt() string { + if x != nil { + return x.FinishedAt + } + return "" +} + +func (x *BackupJob) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *BackupJob) GetArtifact() *BackupArtifact { + if x != nil { + return x.Artifact + } + return nil +} + +func (x *BackupJob) GetS3Object() *BackupS3Object { + if x != nil { + return x.S3Object + } + return nil +} + +type CreateBackupJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateBackupJobResponse) Reset() { + *x = CreateBackupJobResponse{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateBackupJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupJobResponse) ProtoMessage() {} + +func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupJobResponse.ProtoReflect.Descriptor instead. +func (*CreateBackupJobResponse) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} +} + +func (x *CreateBackupJobResponse) GetJob() *BackupJob { + if x != nil { + return x.Job + } + return nil +} + +type ListBackupJobsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + BackupType string `protobuf:"bytes,4,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListBackupJobsRequest) Reset() { + *x = ListBackupJobsRequest{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListBackupJobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupJobsRequest) ProtoMessage() {} + +func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupJobsRequest.ProtoReflect.Descriptor instead. +func (*ListBackupJobsRequest) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} +} + +func (x *ListBackupJobsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListBackupJobsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListBackupJobsRequest) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *ListBackupJobsRequest) GetBackupType() string { + if x != nil { + return x.BackupType + } + return "" +} + +type ListBackupJobsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*BackupJob `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListBackupJobsResponse) Reset() { + *x = ListBackupJobsResponse{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListBackupJobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupJobsResponse) ProtoMessage() {} + +func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupJobsResponse.ProtoReflect.Descriptor instead. +func (*ListBackupJobsResponse) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} +} + +func (x *ListBackupJobsResponse) GetItems() []*BackupJob { + if x != nil { + return x.Items + } + return nil +} + +func (x *ListBackupJobsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +type GetBackupJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetBackupJobRequest) Reset() { + *x = GetBackupJobRequest{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetBackupJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBackupJobRequest) ProtoMessage() {} + +func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBackupJobRequest.ProtoReflect.Descriptor instead. +func (*GetBackupJobRequest) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} +} + +func (x *GetBackupJobRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +type GetBackupJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetBackupJobResponse) Reset() { + *x = GetBackupJobResponse{} + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetBackupJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBackupJobResponse) ProtoMessage() {} + +func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBackupJobResponse.ProtoReflect.Descriptor instead. +func (*GetBackupJobResponse) Descriptor() ([]byte, []int) { + return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} +} + +func (x *GetBackupJobResponse) GetJob() *BackupJob { + if x != nil { + return x.Job + } + return nil +} + +var File_internal_backup_proto_backup_v1_backup_proto protoreflect.FileDescriptor + +const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + + "\n" + + ",internal/backup/proto/backup/v1/backup.proto\x12\tbackup.v1\"\x0f\n" + + "\rHealthRequest\"i\n" + + "\x0eHealthResponse\x12\x16\n" + + "\x06status\x18\x01 \x01(\tR\x06status\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12%\n" + + "\x0euptime_seconds\x18\x03 \x01(\x03R\ruptimeSeconds\"\x84\x02\n" + + "\fSourceConfig\x12\x12\n" + + "\x04host\x18\x01 \x01(\tR\x04host\x12\x12\n" + + "\x04port\x18\x02 \x01(\x05R\x04port\x12\x12\n" + + "\x04user\x18\x03 \x01(\tR\x04user\x12\x1a\n" + + "\bpassword\x18\x04 \x01(\tR\bpassword\x12\x1a\n" + + "\bdatabase\x18\x05 \x01(\tR\bdatabase\x12\x19\n" + + "\bssl_mode\x18\x06 \x01(\tR\asslMode\x12\x12\n" + + "\x04addr\x18\a \x01(\tR\x04addr\x12\x1a\n" + + "\busername\x18\b \x01(\tR\busername\x12\x0e\n" + + "\x02db\x18\t \x01(\x05R\x02db\x12%\n" + + "\x0econtainer_name\x18\n" + + " \x01(\tR\rcontainerName\"\x9b\x02\n" + + "\bS3Config\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\x12\x1a\n" + + "\bendpoint\x18\x02 \x01(\tR\bendpoint\x12\x16\n" + + "\x06region\x18\x03 \x01(\tR\x06region\x12\x16\n" + + "\x06bucket\x18\x04 \x01(\tR\x06bucket\x12\"\n" + + "\raccess_key_id\x18\x05 \x01(\tR\vaccessKeyId\x12*\n" + + "\x11secret_access_key\x18\x06 \x01(\tR\x0fsecretAccessKey\x12\x16\n" + + "\x06prefix\x18\a \x01(\tR\x06prefix\x12(\n" + + "\x10force_path_style\x18\b \x01(\bR\x0eforcePathStyle\x12\x17\n" + + "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xbe\x02\n" + + "\fBackupConfig\x12\x1f\n" + + "\vsource_mode\x18\x01 \x01(\tR\n" + + "sourceMode\x12\x1f\n" + + "\vbackup_root\x18\x02 \x01(\tR\n" + + "backupRoot\x12\x1f\n" + + "\vsqlite_path\x18\x03 \x01(\tR\n" + + "sqlitePath\x12%\n" + + "\x0eretention_days\x18\x04 \x01(\x05R\rretentionDays\x12\x1b\n" + + "\tkeep_last\x18\x05 \x01(\x05R\bkeepLast\x123\n" + + "\bpostgres\x18\x06 \x01(\v2\x17.backup.v1.SourceConfigR\bpostgres\x12-\n" + + "\x05redis\x18\a \x01(\v2\x17.backup.v1.SourceConfigR\x05redis\x12#\n" + + "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"\x12\n" + + "\x10GetConfigRequest\"D\n" + + "\x11GetConfigResponse\x12/\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"F\n" + + "\x13UpdateConfigRequest\x12/\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"G\n" + + "\x14UpdateConfigResponse\x12/\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"8\n" + + "\x11ValidateS3Request\x12#\n" + + "\x02s3\x18\x01 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\">\n" + + "\x12ValidateS3Response\x12\x0e\n" + + "\x02ok\x18\x01 \x01(\bR\x02ok\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\"\xa7\x01\n" + + "\x16CreateBackupJobRequest\x12\x1f\n" + + "\vbackup_type\x18\x01 \x01(\tR\n" + + "backupType\x12 \n" + + "\fupload_to_s3\x18\x02 \x01(\bR\n" + + "uploadToS3\x12!\n" + + "\ftriggered_by\x18\x03 \x01(\tR\vtriggeredBy\x12'\n" + + "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\"f\n" + + "\x0eBackupArtifact\x12\x1d\n" + + "\n" + + "local_path\x18\x01 \x01(\tR\tlocalPath\x12\x1d\n" + + "\n" + + "size_bytes\x18\x02 \x01(\x03R\tsizeBytes\x12\x16\n" + + "\x06sha256\x18\x03 \x01(\tR\x06sha256\"N\n" + + "\x0eBackupS3Object\x12\x16\n" + + "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12\x10\n" + + "\x03key\x18\x02 \x01(\tR\x03key\x12\x12\n" + + "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9d\x03\n" + + "\tBackupJob\x12\x15\n" + + "\x06job_id\x18\x01 \x01(\tR\x05jobId\x12\x1f\n" + + "\vbackup_type\x18\x02 \x01(\tR\n" + + "backupType\x12\x16\n" + + "\x06status\x18\x03 \x01(\tR\x06status\x12!\n" + + "\ftriggered_by\x18\x04 \x01(\tR\vtriggeredBy\x12'\n" + + "\x0fidempotency_key\x18\x05 \x01(\tR\x0eidempotencyKey\x12 \n" + + "\fupload_to_s3\x18\x06 \x01(\bR\n" + + "uploadToS3\x12\x1d\n" + + "\n" + + "started_at\x18\a \x01(\tR\tstartedAt\x12\x1f\n" + + "\vfinished_at\x18\b \x01(\tR\n" + + "finishedAt\x12#\n" + + "\rerror_message\x18\t \x01(\tR\ferrorMessage\x125\n" + + "\bartifact\x18\n" + + " \x01(\v2\x19.backup.v1.BackupArtifactR\bartifact\x126\n" + + "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\"A\n" + + "\x17CreateBackupJobResponse\x12&\n" + + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job\"\x8c\x01\n" + + "\x15ListBackupJobsRequest\x12\x1b\n" + + "\tpage_size\x18\x01 \x01(\x05R\bpageSize\x12\x1d\n" + + "\n" + + "page_token\x18\x02 \x01(\tR\tpageToken\x12\x16\n" + + "\x06status\x18\x03 \x01(\tR\x06status\x12\x1f\n" + + "\vbackup_type\x18\x04 \x01(\tR\n" + + "backupType\"l\n" + + "\x16ListBackupJobsResponse\x12*\n" + + "\x05items\x18\x01 \x03(\v2\x14.backup.v1.BackupJobR\x05items\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\tR\rnextPageToken\",\n" + + "\x13GetBackupJobRequest\x12\x15\n" + + "\x06job_id\x18\x01 \x01(\tR\x05jobId\">\n" + + "\x14GetBackupJobResponse\x12&\n" + + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\xb4\x04\n" + + "\rBackupService\x12=\n" + + "\x06Health\x12\x18.backup.v1.HealthRequest\x1a\x19.backup.v1.HealthResponse\x12F\n" + + "\tGetConfig\x12\x1b.backup.v1.GetConfigRequest\x1a\x1c.backup.v1.GetConfigResponse\x12O\n" + + "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12I\n" + + "\n" + + "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12X\n" + + "\x0fCreateBackupJob\x12!.backup.v1.CreateBackupJobRequest\x1a\".backup.v1.CreateBackupJobResponse\x12U\n" + + "\x0eListBackupJobs\x12 .backup.v1.ListBackupJobsRequest\x1a!.backup.v1.ListBackupJobsResponse\x12O\n" + + "\fGetBackupJob\x12\x1e.backup.v1.GetBackupJobRequest\x1a\x1f.backup.v1.GetBackupJobResponseBFZDgithub.com/Wei-Shaw/sub2api/internal/backup/proto/backup/v1;backupv1b\x06proto3" + +var ( + file_internal_backup_proto_backup_v1_backup_proto_rawDescOnce sync.Once + file_internal_backup_proto_backup_v1_backup_proto_rawDescData []byte +) + +func file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP() []byte { + file_internal_backup_proto_backup_v1_backup_proto_rawDescOnce.Do(func() { + file_internal_backup_proto_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_internal_backup_proto_backup_v1_backup_proto_rawDesc), len(file_internal_backup_proto_backup_v1_backup_proto_rawDesc))) + }) + return file_internal_backup_proto_backup_v1_backup_proto_rawDescData +} + +var file_internal_backup_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_internal_backup_proto_backup_v1_backup_proto_goTypes = []any{ + (*HealthRequest)(nil), // 0: backup.v1.HealthRequest + (*HealthResponse)(nil), // 1: backup.v1.HealthResponse + (*SourceConfig)(nil), // 2: backup.v1.SourceConfig + (*S3Config)(nil), // 3: backup.v1.S3Config + (*BackupConfig)(nil), // 4: backup.v1.BackupConfig + (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest + (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse + (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest + (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse + (*ValidateS3Request)(nil), // 9: backup.v1.ValidateS3Request + (*ValidateS3Response)(nil), // 10: backup.v1.ValidateS3Response + (*CreateBackupJobRequest)(nil), // 11: backup.v1.CreateBackupJobRequest + (*BackupArtifact)(nil), // 12: backup.v1.BackupArtifact + (*BackupS3Object)(nil), // 13: backup.v1.BackupS3Object + (*BackupJob)(nil), // 14: backup.v1.BackupJob + (*CreateBackupJobResponse)(nil), // 15: backup.v1.CreateBackupJobResponse + (*ListBackupJobsRequest)(nil), // 16: backup.v1.ListBackupJobsRequest + (*ListBackupJobsResponse)(nil), // 17: backup.v1.ListBackupJobsResponse + (*GetBackupJobRequest)(nil), // 18: backup.v1.GetBackupJobRequest + (*GetBackupJobResponse)(nil), // 19: backup.v1.GetBackupJobResponse +} +var file_internal_backup_proto_backup_v1_backup_proto_depIdxs = []int32{ + 2, // 0: backup.v1.BackupConfig.postgres:type_name -> backup.v1.SourceConfig + 2, // 1: backup.v1.BackupConfig.redis:type_name -> backup.v1.SourceConfig + 3, // 2: backup.v1.BackupConfig.s3:type_name -> backup.v1.S3Config + 4, // 3: backup.v1.GetConfigResponse.config:type_name -> backup.v1.BackupConfig + 4, // 4: backup.v1.UpdateConfigRequest.config:type_name -> backup.v1.BackupConfig + 4, // 5: backup.v1.UpdateConfigResponse.config:type_name -> backup.v1.BackupConfig + 3, // 6: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config + 12, // 7: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact + 13, // 8: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object + 14, // 9: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob + 14, // 10: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob + 14, // 11: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob + 0, // 12: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest + 5, // 13: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest + 7, // 14: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest + 9, // 15: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request + 11, // 16: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest + 16, // 17: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest + 18, // 18: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest + 1, // 19: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse + 6, // 20: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse + 8, // 21: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse + 10, // 22: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response + 15, // 23: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse + 17, // 24: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse + 19, // 25: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse + 19, // [19:26] is the sub-list for method output_type + 12, // [12:19] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_internal_backup_proto_backup_v1_backup_proto_init() } +func file_internal_backup_proto_backup_v1_backup_proto_init() { + if File_internal_backup_proto_backup_v1_backup_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_internal_backup_proto_backup_v1_backup_proto_rawDesc), len(file_internal_backup_proto_backup_v1_backup_proto_rawDesc)), + NumEnums: 0, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_internal_backup_proto_backup_v1_backup_proto_goTypes, + DependencyIndexes: file_internal_backup_proto_backup_v1_backup_proto_depIdxs, + MessageInfos: file_internal_backup_proto_backup_v1_backup_proto_msgTypes, + }.Build() + File_internal_backup_proto_backup_v1_backup_proto = out.File + file_internal_backup_proto_backup_v1_backup_proto_goTypes = nil + file_internal_backup_proto_backup_v1_backup_proto_depIdxs = nil +} diff --git a/backend/internal/backup/proto/backup/v1/backup.proto b/backend/internal/backup/proto/backup/v1/backup.proto new file mode 100644 index 000000000..38e015742 --- /dev/null +++ b/backend/internal/backup/proto/backup/v1/backup.proto @@ -0,0 +1,139 @@ +syntax = "proto3"; + +package backup.v1; + +option go_package = "github.com/Wei-Shaw/sub2api/internal/backup/proto/backup/v1;backupv1"; + +service BackupService { + rpc Health(HealthRequest) returns (HealthResponse); + rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); + rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse); + rpc ValidateS3(ValidateS3Request) returns (ValidateS3Response); + rpc CreateBackupJob(CreateBackupJobRequest) returns (CreateBackupJobResponse); + rpc ListBackupJobs(ListBackupJobsRequest) returns (ListBackupJobsResponse); + rpc GetBackupJob(GetBackupJobRequest) returns (GetBackupJobResponse); +} + +message HealthRequest {} + +message HealthResponse { + string status = 1; + string version = 2; + int64 uptime_seconds = 3; +} + +message SourceConfig { + string host = 1; + int32 port = 2; + string user = 3; + string password = 4; + string database = 5; + string ssl_mode = 6; + string addr = 7; + string username = 8; + int32 db = 9; + string container_name = 10; +} + +message S3Config { + bool enabled = 1; + string endpoint = 2; + string region = 3; + string bucket = 4; + string access_key_id = 5; + string secret_access_key = 6; + string prefix = 7; + bool force_path_style = 8; + bool use_ssl = 9; +} + +message BackupConfig { + string source_mode = 1; + string backup_root = 2; + string sqlite_path = 3; + int32 retention_days = 4; + int32 keep_last = 5; + SourceConfig postgres = 6; + SourceConfig redis = 7; + S3Config s3 = 8; +} + +message GetConfigRequest {} + +message GetConfigResponse { + BackupConfig config = 1; +} + +message UpdateConfigRequest { + BackupConfig config = 1; +} + +message UpdateConfigResponse { + BackupConfig config = 1; +} + +message ValidateS3Request { + S3Config s3 = 1; +} + +message ValidateS3Response { + bool ok = 1; + string message = 2; +} + +message CreateBackupJobRequest { + string backup_type = 1; + bool upload_to_s3 = 2; + string triggered_by = 3; + string idempotency_key = 4; +} + +message BackupArtifact { + string local_path = 1; + int64 size_bytes = 2; + string sha256 = 3; +} + +message BackupS3Object { + string bucket = 1; + string key = 2; + string etag = 3; +} + +message BackupJob { + string job_id = 1; + string backup_type = 2; + string status = 3; + string triggered_by = 4; + string idempotency_key = 5; + bool upload_to_s3 = 6; + string started_at = 7; + string finished_at = 8; + string error_message = 9; + BackupArtifact artifact = 10; + BackupS3Object s3_object = 11; +} + +message CreateBackupJobResponse { + BackupJob job = 1; +} + +message ListBackupJobsRequest { + int32 page_size = 1; + string page_token = 2; + string status = 3; + string backup_type = 4; +} + +message ListBackupJobsResponse { + repeated BackupJob items = 1; + string next_page_token = 2; +} + +message GetBackupJobRequest { + string job_id = 1; +} + +message GetBackupJobResponse { + BackupJob job = 1; +} diff --git a/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go b/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go new file mode 100644 index 000000000..cf86ae813 --- /dev/null +++ b/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go @@ -0,0 +1,349 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.0 +// - protoc v6.32.1 +// source: internal/backup/proto/backup/v1/backup.proto + +package backupv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" + BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" + BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" + BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" + BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" + BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" + BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" +) + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type BackupServiceClient interface { + Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) + CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) + ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) + GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) +} + +type backupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewBackupServiceClient(cc grpc.ClientConnInterface) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthResponse) + err := c.cc.Invoke(ctx, BackupService_Health_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetConfigResponse) + err := c.cc.Invoke(ctx, BackupService_GetConfig_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateConfigResponse) + err := c.cc.Invoke(ctx, BackupService_UpdateConfig_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ValidateS3Response) + err := c.cc.Invoke(ctx, BackupService_ValidateS3_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateBackupJobResponse) + err := c.cc.Invoke(ctx, BackupService_CreateBackupJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListBackupJobsResponse) + err := c.cc.Invoke(ctx, BackupService_ListBackupJobs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetBackupJobResponse) + err := c.cc.Invoke(ctx, BackupService_GetBackupJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +// All implementations must embed UnimplementedBackupServiceServer +// for forward compatibility. +type BackupServiceServer interface { + Health(context.Context, *HealthRequest) (*HealthResponse, error) + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) + CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) + ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) + GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) + mustEmbedUnimplementedBackupServiceServer() +} + +// UnimplementedBackupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedBackupServiceServer struct{} + +func (UnimplementedBackupServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Health not implemented") +} +func (UnimplementedBackupServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") +} +func (UnimplementedBackupServiceServer) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateConfig not implemented") +} +func (UnimplementedBackupServiceServer) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) { + return nil, status.Error(codes.Unimplemented, "method ValidateS3 not implemented") +} +func (UnimplementedBackupServiceServer) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateBackupJob not implemented") +} +func (UnimplementedBackupServiceServer) ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListBackupJobs not implemented") +} +func (UnimplementedBackupServiceServer) GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetBackupJob not implemented") +} +func (UnimplementedBackupServiceServer) mustEmbedUnimplementedBackupServiceServer() {} +func (UnimplementedBackupServiceServer) testEmbeddedByValue() {} + +// UnsafeBackupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BackupServiceServer will +// result in compilation errors. +type UnsafeBackupServiceServer interface { + mustEmbedUnimplementedBackupServiceServer() +} + +func RegisterBackupServiceServer(s grpc.ServiceRegistrar, srv BackupServiceServer) { + // If the following call panics, it indicates UnimplementedBackupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&BackupService_ServiceDesc, srv) +} + +func _BackupService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).Health(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_Health_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).Health(ctx, req.(*HealthRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_GetConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_UpdateConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateS3Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ValidateS3(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ValidateS3_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ValidateS3(ctx, req.(*ValidateS3Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_CreateBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBackupJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).CreateBackupJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_CreateBackupJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).CreateBackupJob(ctx, req.(*CreateBackupJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_ListBackupJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListBackupJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListBackupJobs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListBackupJobs(ctx, req.(*ListBackupJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_GetBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBackupJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).GetBackupJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_GetBackupJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).GetBackupJob(ctx, req.(*GetBackupJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// BackupService_ServiceDesc is the grpc.ServiceDesc for BackupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var BackupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "backup.v1.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Health", + Handler: _BackupService_Health_Handler, + }, + { + MethodName: "GetConfig", + Handler: _BackupService_GetConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _BackupService_UpdateConfig_Handler, + }, + { + MethodName: "ValidateS3", + Handler: _BackupService_ValidateS3_Handler, + }, + { + MethodName: "CreateBackupJob", + Handler: _BackupService_CreateBackupJob_Handler, + }, + { + MethodName: "ListBackupJobs", + Handler: _BackupService_ListBackupJobs_Handler, + }, + { + MethodName: "GetBackupJob", + Handler: _BackupService_GetBackupJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "internal/backup/proto/backup/v1/backup.proto", +} diff --git a/backend/internal/handler/admin/data_management_handler.go b/backend/internal/handler/admin/data_management_handler.go new file mode 100644 index 000000000..de9545093 --- /dev/null +++ b/backend/internal/handler/admin/data_management_handler.go @@ -0,0 +1,225 @@ +package admin + +import ( + "strconv" + "strings" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/Wei-Shaw/sub2api/internal/pkg/response" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + + "github.com/gin-gonic/gin" +) + +type DataManagementHandler struct { + dataManagementService *service.DataManagementService +} + +func NewDataManagementHandler(dataManagementService *service.DataManagementService) *DataManagementHandler { + return &DataManagementHandler{dataManagementService: dataManagementService} +} + +type TestS3ConnectionRequest struct { + Endpoint string `json:"endpoint"` + Region string `json:"region" binding:"required"` + Bucket string `json:"bucket" binding:"required"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Prefix string `json:"prefix"` + ForcePathStyle bool `json:"force_path_style"` + UseSSL bool `json:"use_ssl"` +} + +type CreateBackupJobRequest struct { + BackupType string `json:"backup_type" binding:"required,oneof=postgres redis full"` + UploadToS3 bool `json:"upload_to_s3"` + IdempotencyKey string `json:"idempotency_key"` +} + +func (h *DataManagementHandler) GetAgentHealth(c *gin.Context) { + health := h.getAgentHealth(c) + payload := gin.H{ + "enabled": health.Enabled, + "reason": health.Reason, + "socket_path": health.SocketPath, + } + if health.Agent != nil { + payload["agent"] = gin.H{ + "status": health.Agent.Status, + "version": health.Agent.Version, + "uptime_seconds": health.Agent.UptimeSeconds, + } + } + response.Success(c, payload) +} + +func (h *DataManagementHandler) GetConfig(c *gin.Context) { + if !h.requireAgentEnabled(c) { + return + } + cfg, err := h.dataManagementService.GetConfig(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, cfg) +} + +func (h *DataManagementHandler) UpdateConfig(c *gin.Context) { + var req service.DataManagementConfig + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if !h.requireAgentEnabled(c) { + return + } + cfg, err := h.dataManagementService.UpdateConfig(c.Request.Context(), req) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, cfg) +} + +func (h *DataManagementHandler) TestS3(c *gin.Context) { + var req TestS3ConnectionRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if !h.requireAgentEnabled(c) { + return + } + result, err := h.dataManagementService.ValidateS3(c.Request.Context(), service.DataManagementS3Config{ + Enabled: true, + Endpoint: req.Endpoint, + Region: req.Region, + Bucket: req.Bucket, + AccessKeyID: req.AccessKeyID, + SecretAccessKey: req.SecretAccessKey, + Prefix: req.Prefix, + ForcePathStyle: req.ForcePathStyle, + UseSSL: req.UseSSL, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"ok": result.OK, "message": result.Message}) +} + +func (h *DataManagementHandler) CreateBackupJob(c *gin.Context) { + var req CreateBackupJobRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + req.IdempotencyKey = normalizeBackupIdempotencyKey(c.GetHeader("X-Idempotency-Key"), req.IdempotencyKey) + if !h.requireAgentEnabled(c) { + return + } + + triggeredBy := "admin:unknown" + if subject, ok := middleware2.GetAuthSubjectFromContext(c); ok { + triggeredBy = "admin:" + strconv.FormatInt(subject.UserID, 10) + } + job, err := h.dataManagementService.CreateBackupJob(c.Request.Context(), service.DataManagementCreateBackupJobInput{ + BackupType: req.BackupType, + UploadToS3: req.UploadToS3, + TriggeredBy: triggeredBy, + IdempotencyKey: req.IdempotencyKey, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"job_id": job.JobID, "status": job.Status}) +} + +func (h *DataManagementHandler) ListBackupJobs(c *gin.Context) { + if !h.requireAgentEnabled(c) { + return + } + + pageSize := int32(20) + if raw := strings.TrimSpace(c.Query("page_size")); raw != "" { + v, err := strconv.Atoi(raw) + if err != nil || v <= 0 { + response.BadRequest(c, "Invalid page_size") + return + } + pageSize = int32(v) + } + + result, err := h.dataManagementService.ListBackupJobs(c.Request.Context(), service.DataManagementListBackupJobsInput{ + PageSize: pageSize, + PageToken: c.Query("page_token"), + Status: c.Query("status"), + BackupType: c.Query("backup_type"), + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, result) +} + +func (h *DataManagementHandler) GetBackupJob(c *gin.Context) { + jobID := strings.TrimSpace(c.Param("job_id")) + if jobID == "" { + response.BadRequest(c, "Invalid backup job ID") + return + } + + if !h.requireAgentEnabled(c) { + return + } + job, err := h.dataManagementService.GetBackupJob(c.Request.Context(), jobID) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, job) +} + +func (h *DataManagementHandler) requireAgentEnabled(c *gin.Context) bool { + if h.dataManagementService == nil { + err := infraerrors.ServiceUnavailable( + service.BackupAgentUnavailableReason, + "backup agent service is not configured", + ).WithMetadata(map[string]string{"socket_path": service.DefaultBackupAgentSocketPath}) + response.ErrorFrom(c, err) + return false + } + + if err := h.dataManagementService.EnsureAgentEnabled(c.Request.Context()); err != nil { + response.ErrorFrom(c, err) + return false + } + + return true +} + +func (h *DataManagementHandler) getAgentHealth(c *gin.Context) service.DataManagementAgentHealth { + if h.dataManagementService == nil { + return service.DataManagementAgentHealth{ + Enabled: false, + Reason: service.BackupAgentUnavailableReason, + SocketPath: service.DefaultBackupAgentSocketPath, + } + } + return h.dataManagementService.GetAgentHealth(c.Request.Context()) +} + +func normalizeBackupIdempotencyKey(headerValue, bodyValue string) string { + headerKey := strings.TrimSpace(headerValue) + if headerKey != "" { + return headerKey + } + return strings.TrimSpace(bodyValue) +} diff --git a/backend/internal/handler/admin/data_management_handler_test.go b/backend/internal/handler/admin/data_management_handler_test.go new file mode 100644 index 000000000..235b4bd0c --- /dev/null +++ b/backend/internal/handler/admin/data_management_handler_test.go @@ -0,0 +1,78 @@ +package admin + +import ( + "encoding/json" + "net/http" + "net/http/httptest" + "path/filepath" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type apiEnvelope struct { + Code int `json:"code"` + Message string `json:"message"` + Reason string `json:"reason"` + Data json.RawMessage `json:"data"` +} + +func TestDataManagementHandler_AgentHealthAlways200(t *testing.T) { + gin.SetMode(gin.TestMode) + + svc := service.NewDataManagementServiceWithOptions(filepath.Join(t.TempDir(), "missing.sock"), 50*time.Millisecond) + h := NewDataManagementHandler(svc) + + r := gin.New() + r.GET("/api/v1/admin/data-management/agent/health", h.GetAgentHealth) + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/data-management/agent/health", nil) + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + + var envelope apiEnvelope + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &envelope)) + require.Equal(t, 0, envelope.Code) + + var data struct { + Enabled bool `json:"enabled"` + Reason string `json:"reason"` + SocketPath string `json:"socket_path"` + } + require.NoError(t, json.Unmarshal(envelope.Data, &data)) + require.False(t, data.Enabled) + require.Equal(t, service.BackupAgentSocketMissingReason, data.Reason) + require.Equal(t, svc.SocketPath(), data.SocketPath) +} + +func TestDataManagementHandler_NonHealthRouteReturns503WhenDisabled(t *testing.T) { + gin.SetMode(gin.TestMode) + + svc := service.NewDataManagementServiceWithOptions(filepath.Join(t.TempDir(), "missing.sock"), 50*time.Millisecond) + h := NewDataManagementHandler(svc) + + r := gin.New() + r.GET("/api/v1/admin/data-management/config", h.GetConfig) + + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodGet, "/api/v1/admin/data-management/config", nil) + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusServiceUnavailable, rec.Code) + + var envelope apiEnvelope + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &envelope)) + require.Equal(t, http.StatusServiceUnavailable, envelope.Code) + require.Equal(t, service.BackupAgentSocketMissingReason, envelope.Reason) +} + +func TestNormalizeBackupIdempotencyKey(t *testing.T) { + require.Equal(t, "from-header", normalizeBackupIdempotencyKey("from-header", "from-body")) + require.Equal(t, "from-body", normalizeBackupIdempotencyKey(" ", " from-body ")) + require.Equal(t, "", normalizeBackupIdempotencyKey("", "")) +} diff --git a/backend/internal/handler/handler.go b/backend/internal/handler/handler.go index b999180b6..61fc0dc4e 100644 --- a/backend/internal/handler/handler.go +++ b/backend/internal/handler/handler.go @@ -11,6 +11,7 @@ type AdminHandlers struct { Group *admin.GroupHandler Account *admin.AccountHandler Announcement *admin.AnnouncementHandler + DataManagement *admin.DataManagementHandler OAuth *admin.OAuthHandler OpenAIOAuth *admin.OpenAIOAuthHandler GeminiOAuth *admin.GeminiOAuthHandler diff --git a/backend/internal/handler/wire.go b/backend/internal/handler/wire.go index 79d583fde..39a3cc50b 100644 --- a/backend/internal/handler/wire.go +++ b/backend/internal/handler/wire.go @@ -14,6 +14,7 @@ func ProvideAdminHandlers( groupHandler *admin.GroupHandler, accountHandler *admin.AccountHandler, announcementHandler *admin.AnnouncementHandler, + dataManagementHandler *admin.DataManagementHandler, oauthHandler *admin.OAuthHandler, openaiOAuthHandler *admin.OpenAIOAuthHandler, geminiOAuthHandler *admin.GeminiOAuthHandler, @@ -35,6 +36,7 @@ func ProvideAdminHandlers( Group: groupHandler, Account: accountHandler, Announcement: announcementHandler, + DataManagement: dataManagementHandler, OAuth: oauthHandler, OpenAIOAuth: openaiOAuthHandler, GeminiOAuth: geminiOAuthHandler, @@ -119,6 +121,7 @@ var ProviderSet = wire.NewSet( admin.NewGroupHandler, admin.NewAccountHandler, admin.NewAnnouncementHandler, + admin.NewDataManagementHandler, admin.NewOAuthHandler, admin.NewOpenAIOAuthHandler, admin.NewGeminiOAuthHandler, diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 4b4d97c34..6e25835dc 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -55,6 +55,9 @@ func RegisterAdminRoutes( // 系统设置 registerSettingsRoutes(admin, h) + // 数据管理 + registerDataManagementRoutes(admin, h) + // 运维监控(Ops) registerOpsRoutes(admin, h) @@ -372,6 +375,19 @@ func registerSettingsRoutes(admin *gin.RouterGroup, h *handler.Handlers) { } } +func registerDataManagementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { + dataManagement := admin.Group("/data-management") + { + dataManagement.GET("/agent/health", h.Admin.DataManagement.GetAgentHealth) + dataManagement.GET("/config", h.Admin.DataManagement.GetConfig) + dataManagement.PUT("/config", h.Admin.DataManagement.UpdateConfig) + dataManagement.POST("/s3/test", h.Admin.DataManagement.TestS3) + dataManagement.POST("/backups", h.Admin.DataManagement.CreateBackupJob) + dataManagement.GET("/backups", h.Admin.DataManagement.ListBackupJobs) + dataManagement.GET("/backups/:job_id", h.Admin.DataManagement.GetBackupJob) + } +} + func registerSystemRoutes(admin *gin.RouterGroup, h *handler.Handlers) { system := admin.Group("/system") { diff --git a/backend/internal/service/data_management_grpc.go b/backend/internal/service/data_management_grpc.go new file mode 100644 index 000000000..61c0e5cc5 --- /dev/null +++ b/backend/internal/service/data_management_grpc.go @@ -0,0 +1,533 @@ +package service + +import ( + "context" + "fmt" + "net" + "strings" + "time" + + backupv1 "github.com/Wei-Shaw/sub2api/internal/backup/proto/backup/v1" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" + "google.golang.org/grpc/metadata" + grpcstatus "google.golang.org/grpc/status" +) + +const ( + backupInvalidArgumentReason = "BACKUP_INVALID_ARGUMENT" + backupResourceNotFoundReason = "BACKUP_RESOURCE_NOT_FOUND" + backupFailedPrecondition = "BACKUP_FAILED_PRECONDITION" + backupAgentTimeoutReason = "BACKUP_AGENT_TIMEOUT" + backupAgentInternalReason = "BACKUP_AGENT_INTERNAL" + defaultBackupRPCTimeout = 8 * time.Second +) + +type DataManagementPostgresConfig struct { + Host string `json:"host"` + Port int32 `json:"port"` + User string `json:"user"` + Password string `json:"password,omitempty"` + PasswordConfigured bool `json:"password_configured"` + Database string `json:"database"` + SSLMode string `json:"ssl_mode"` + ContainerName string `json:"container_name"` +} + +type DataManagementRedisConfig struct { + Addr string `json:"addr"` + Username string `json:"username"` + Password string `json:"password,omitempty"` + PasswordConfigured bool `json:"password_configured"` + DB int32 `json:"db"` + ContainerName string `json:"container_name"` +} + +type DataManagementS3Config struct { + Enabled bool `json:"enabled"` + Endpoint string `json:"endpoint"` + Region string `json:"region"` + Bucket string `json:"bucket"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key,omitempty"` + SecretAccessKeyConfigured bool `json:"secret_access_key_configured"` + Prefix string `json:"prefix"` + ForcePathStyle bool `json:"force_path_style"` + UseSSL bool `json:"use_ssl"` +} + +type DataManagementConfig struct { + SourceMode string `json:"source_mode"` + BackupRoot string `json:"backup_root"` + SQLitePath string `json:"sqlite_path,omitempty"` + RetentionDays int32 `json:"retention_days"` + KeepLast int32 `json:"keep_last"` + Postgres DataManagementPostgresConfig `json:"postgres"` + Redis DataManagementRedisConfig `json:"redis"` + S3 DataManagementS3Config `json:"s3"` +} + +type DataManagementTestS3Result struct { + OK bool `json:"ok"` + Message string `json:"message"` +} + +type DataManagementCreateBackupJobInput struct { + BackupType string + UploadToS3 bool + TriggeredBy string + IdempotencyKey string +} + +type DataManagementListBackupJobsInput struct { + PageSize int32 + PageToken string + Status string + BackupType string +} + +type DataManagementArtifactInfo struct { + LocalPath string `json:"local_path"` + SizeBytes int64 `json:"size_bytes"` + SHA256 string `json:"sha256"` +} + +type DataManagementS3ObjectInfo struct { + Bucket string `json:"bucket"` + Key string `json:"key"` + ETag string `json:"etag"` +} + +type DataManagementBackupJob struct { + JobID string `json:"job_id"` + BackupType string `json:"backup_type"` + Status string `json:"status"` + TriggeredBy string `json:"triggered_by"` + IdempotencyKey string `json:"idempotency_key,omitempty"` + UploadToS3 bool `json:"upload_to_s3"` + StartedAt string `json:"started_at,omitempty"` + FinishedAt string `json:"finished_at,omitempty"` + ErrorMessage string `json:"error_message,omitempty"` + Artifact DataManagementArtifactInfo `json:"artifact"` + S3Object DataManagementS3ObjectInfo `json:"s3"` +} + +type DataManagementListBackupJobsResult struct { + Items []DataManagementBackupJob `json:"items"` + NextPageToken string `json:"next_page_token,omitempty"` +} + +func (s *DataManagementService) GetConfig(ctx context.Context) (DataManagementConfig, error) { + var resp *backupv1.GetConfigResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.GetConfig(callCtx, &backupv1.GetConfigRequest{}) + return callErr + }) + if err != nil { + return DataManagementConfig{}, err + } + return mapProtoConfig(resp.GetConfig()), nil +} + +func (s *DataManagementService) UpdateConfig(ctx context.Context, cfg DataManagementConfig) (DataManagementConfig, error) { + if err := validateDataManagementConfig(cfg); err != nil { + return DataManagementConfig{}, err + } + + var resp *backupv1.UpdateConfigResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.UpdateConfig(callCtx, &backupv1.UpdateConfigRequest{Config: mapToProtoConfig(cfg)}) + return callErr + }) + if err != nil { + return DataManagementConfig{}, err + } + return mapProtoConfig(resp.GetConfig()), nil +} + +func (s *DataManagementService) ValidateS3(ctx context.Context, cfg DataManagementS3Config) (DataManagementTestS3Result, error) { + if err := validateS3Config(cfg); err != nil { + return DataManagementTestS3Result{}, err + } + + var resp *backupv1.ValidateS3Response + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.ValidateS3(callCtx, &backupv1.ValidateS3Request{ + S3: &backupv1.S3Config{ + Enabled: cfg.Enabled, + Endpoint: strings.TrimSpace(cfg.Endpoint), + Region: strings.TrimSpace(cfg.Region), + Bucket: strings.TrimSpace(cfg.Bucket), + AccessKeyId: strings.TrimSpace(cfg.AccessKeyID), + SecretAccessKey: strings.TrimSpace(cfg.SecretAccessKey), + Prefix: strings.Trim(strings.TrimSpace(cfg.Prefix), "/"), + ForcePathStyle: cfg.ForcePathStyle, + UseSsl: cfg.UseSSL, + }, + }) + return callErr + }) + if err != nil { + return DataManagementTestS3Result{}, err + } + return DataManagementTestS3Result{OK: resp.GetOk(), Message: resp.GetMessage()}, nil +} + +func (s *DataManagementService) CreateBackupJob(ctx context.Context, input DataManagementCreateBackupJobInput) (DataManagementBackupJob, error) { + var resp *backupv1.CreateBackupJobResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.CreateBackupJob(callCtx, &backupv1.CreateBackupJobRequest{ + BackupType: strings.TrimSpace(input.BackupType), + UploadToS3: input.UploadToS3, + TriggeredBy: strings.TrimSpace(input.TriggeredBy), + IdempotencyKey: strings.TrimSpace(input.IdempotencyKey), + }) + return callErr + }) + if err != nil { + return DataManagementBackupJob{}, err + } + return mapProtoJob(resp.GetJob()), nil +} + +func (s *DataManagementService) ListBackupJobs(ctx context.Context, input DataManagementListBackupJobsInput) (DataManagementListBackupJobsResult, error) { + var resp *backupv1.ListBackupJobsResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.ListBackupJobs(callCtx, &backupv1.ListBackupJobsRequest{ + PageSize: input.PageSize, + PageToken: strings.TrimSpace(input.PageToken), + Status: strings.TrimSpace(input.Status), + BackupType: strings.TrimSpace(input.BackupType), + }) + return callErr + }) + if err != nil { + return DataManagementListBackupJobsResult{}, err + } + + items := make([]DataManagementBackupJob, 0, len(resp.GetItems())) + for _, item := range resp.GetItems() { + items = append(items, mapProtoJob(item)) + } + return DataManagementListBackupJobsResult{Items: items, NextPageToken: resp.GetNextPageToken()}, nil +} + +func (s *DataManagementService) GetBackupJob(ctx context.Context, jobID string) (DataManagementBackupJob, error) { + var resp *backupv1.GetBackupJobResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.GetBackupJob(callCtx, &backupv1.GetBackupJobRequest{JobId: strings.TrimSpace(jobID)}) + return callErr + }) + if err != nil { + return DataManagementBackupJob{}, err + } + return mapProtoJob(resp.GetJob()), nil +} + +func (s *DataManagementService) withClient(ctx context.Context, call func(context.Context, backupv1.BackupServiceClient) error) error { + if err := s.EnsureAgentEnabled(ctx); err != nil { + return err + } + + socketPath := s.SocketPath() + dialCtx, dialCancel := context.WithTimeout(ctx, s.dialTimeout) + defer dialCancel() + + conn, err := grpc.DialContext( + dialCtx, + "unix://"+socketPath, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { + dialer := net.Dialer{Timeout: s.dialTimeout} + return dialer.DialContext(ctx, "unix", socketPath) + }), + ) + if err != nil { + return ErrBackupAgentUnavailable.WithMetadata(map[string]string{"socket_path": socketPath}).WithCause(err) + } + defer func() { + _ = conn.Close() + }() + + client := backupv1.NewBackupServiceClient(conn) + callCtx, callCancel := context.WithTimeout(ctx, defaultBackupRPCTimeout) + defer callCancel() + if requestID := requestIDFromContext(ctx); requestID != "" { + callCtx = metadata.AppendToOutgoingContext(callCtx, "x-request-id", requestID) + } + + if err := call(callCtx, client); err != nil { + return mapBackupGRPCError(err, socketPath) + } + return nil +} + +func mapBackupGRPCError(err error, socketPath string) error { + if err == nil { + return nil + } + + st, ok := grpcstatus.FromError(err) + if !ok { + return infraerrors.InternalServer(backupAgentInternalReason, "backup agent call failed"). + WithMetadata(map[string]string{"socket_path": socketPath}). + WithCause(err) + } + + switch st.Code() { + case codes.InvalidArgument: + return infraerrors.BadRequest(backupInvalidArgumentReason, st.Message()) + case codes.NotFound: + return infraerrors.NotFound(backupResourceNotFoundReason, st.Message()) + case codes.FailedPrecondition: + return infraerrors.New(412, backupFailedPrecondition, st.Message()) + case codes.Unavailable: + return infraerrors.ServiceUnavailable(BackupAgentUnavailableReason, st.Message()). + WithMetadata(map[string]string{"socket_path": socketPath}) + case codes.DeadlineExceeded: + return infraerrors.GatewayTimeout(backupAgentTimeoutReason, st.Message()) + default: + return infraerrors.InternalServer(backupAgentInternalReason, st.Message()). + WithMetadata(map[string]string{ + "socket_path": socketPath, + "grpc_code": st.Code().String(), + }) + } +} + +func mapProtoConfig(cfg *backupv1.BackupConfig) DataManagementConfig { + if cfg == nil { + return DataManagementConfig{} + } + postgres := cfg.GetPostgres() + redis := cfg.GetRedis() + s3Cfg := cfg.GetS3() + return DataManagementConfig{ + SourceMode: cfg.GetSourceMode(), + BackupRoot: cfg.GetBackupRoot(), + SQLitePath: cfg.GetSqlitePath(), + RetentionDays: cfg.GetRetentionDays(), + KeepLast: cfg.GetKeepLast(), + Postgres: DataManagementPostgresConfig{ + Host: postgres.GetHost(), + Port: postgres.GetPort(), + User: postgres.GetUser(), + PasswordConfigured: strings.TrimSpace(postgres.GetPassword()) != "", + Database: postgres.GetDatabase(), + SSLMode: postgres.GetSslMode(), + ContainerName: postgres.GetContainerName(), + }, + Redis: DataManagementRedisConfig{ + Addr: redis.GetAddr(), + Username: redis.GetUsername(), + PasswordConfigured: strings.TrimSpace(redis.GetPassword()) != "", + DB: redis.GetDb(), + ContainerName: redis.GetContainerName(), + }, + S3: DataManagementS3Config{ + Enabled: s3Cfg.GetEnabled(), + Endpoint: s3Cfg.GetEndpoint(), + Region: s3Cfg.GetRegion(), + Bucket: s3Cfg.GetBucket(), + AccessKeyID: s3Cfg.GetAccessKeyId(), + SecretAccessKeyConfigured: strings.TrimSpace(s3Cfg.GetSecretAccessKey()) != "", + Prefix: s3Cfg.GetPrefix(), + ForcePathStyle: s3Cfg.GetForcePathStyle(), + UseSSL: s3Cfg.GetUseSsl(), + }, + } +} + +func mapToProtoConfig(cfg DataManagementConfig) *backupv1.BackupConfig { + return &backupv1.BackupConfig{ + SourceMode: strings.TrimSpace(cfg.SourceMode), + BackupRoot: strings.TrimSpace(cfg.BackupRoot), + SqlitePath: strings.TrimSpace(cfg.SQLitePath), + RetentionDays: cfg.RetentionDays, + KeepLast: cfg.KeepLast, + Postgres: &backupv1.SourceConfig{ + Host: strings.TrimSpace(cfg.Postgres.Host), + Port: cfg.Postgres.Port, + User: strings.TrimSpace(cfg.Postgres.User), + Password: strings.TrimSpace(cfg.Postgres.Password), + Database: strings.TrimSpace(cfg.Postgres.Database), + SslMode: strings.TrimSpace(cfg.Postgres.SSLMode), + ContainerName: strings.TrimSpace(cfg.Postgres.ContainerName), + }, + Redis: &backupv1.SourceConfig{ + Addr: strings.TrimSpace(cfg.Redis.Addr), + Username: strings.TrimSpace(cfg.Redis.Username), + Password: strings.TrimSpace(cfg.Redis.Password), + Db: cfg.Redis.DB, + ContainerName: strings.TrimSpace(cfg.Redis.ContainerName), + }, + S3: &backupv1.S3Config{ + Enabled: cfg.S3.Enabled, + Endpoint: strings.TrimSpace(cfg.S3.Endpoint), + Region: strings.TrimSpace(cfg.S3.Region), + Bucket: strings.TrimSpace(cfg.S3.Bucket), + AccessKeyId: strings.TrimSpace(cfg.S3.AccessKeyID), + SecretAccessKey: strings.TrimSpace(cfg.S3.SecretAccessKey), + Prefix: strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/"), + ForcePathStyle: cfg.S3.ForcePathStyle, + UseSsl: cfg.S3.UseSSL, + }, + } +} + +func mapProtoJob(job *backupv1.BackupJob) DataManagementBackupJob { + if job == nil { + return DataManagementBackupJob{} + } + artifact := job.GetArtifact() + s3Object := job.GetS3Object() + artifactOut := DataManagementArtifactInfo{} + if artifact != nil { + artifactOut = DataManagementArtifactInfo{ + LocalPath: artifact.GetLocalPath(), + SizeBytes: artifact.GetSizeBytes(), + SHA256: artifact.GetSha256(), + } + } + s3Out := DataManagementS3ObjectInfo{} + if s3Object != nil { + s3Out = DataManagementS3ObjectInfo{ + Bucket: s3Object.GetBucket(), + Key: s3Object.GetKey(), + ETag: s3Object.GetEtag(), + } + } + + return DataManagementBackupJob{ + JobID: job.GetJobId(), + BackupType: job.GetBackupType(), + Status: job.GetStatus(), + TriggeredBy: job.GetTriggeredBy(), + IdempotencyKey: job.GetIdempotencyKey(), + UploadToS3: job.GetUploadToS3(), + StartedAt: job.GetStartedAt(), + FinishedAt: job.GetFinishedAt(), + ErrorMessage: job.GetErrorMessage(), + Artifact: artifactOut, + S3Object: s3Out, + } +} + +func validateDataManagementConfig(cfg DataManagementConfig) error { + sourceMode := strings.TrimSpace(cfg.SourceMode) + if sourceMode != "direct" && sourceMode != "docker_exec" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "source_mode must be direct or docker_exec") + } + if strings.TrimSpace(cfg.BackupRoot) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "backup_root is required") + } + if cfg.RetentionDays <= 0 { + return infraerrors.BadRequest(backupInvalidArgumentReason, "retention_days must be > 0") + } + if cfg.KeepLast <= 0 { + return infraerrors.BadRequest(backupInvalidArgumentReason, "keep_last must be > 0") + } + + if strings.TrimSpace(cfg.Postgres.Database) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.database is required") + } + if cfg.Postgres.Port <= 0 { + return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.port must be > 0") + } + if sourceMode == "docker_exec" { + if strings.TrimSpace(cfg.Postgres.ContainerName) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.container_name is required in docker_exec mode") + } + if strings.TrimSpace(cfg.Redis.ContainerName) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "redis.container_name is required in docker_exec mode") + } + } else { + if strings.TrimSpace(cfg.Postgres.Host) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.host is required in direct mode") + } + if strings.TrimSpace(cfg.Redis.Addr) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "redis.addr is required in direct mode") + } + } + + if cfg.Redis.DB < 0 { + return infraerrors.BadRequest(backupInvalidArgumentReason, "redis.db must be >= 0") + } + + if cfg.S3.Enabled { + if err := validateS3Config(cfg.S3); err != nil { + return err + } + } + return nil +} + +func validateS3Config(cfg DataManagementS3Config) error { + if strings.TrimSpace(cfg.Region) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "s3.region is required") + } + if strings.TrimSpace(cfg.Bucket) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "s3.bucket is required") + } + return nil +} + +func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataManagementAgentInfo, error) { + socketPath := s.SocketPath() + dialCtx, dialCancel := context.WithTimeout(ctx, s.dialTimeout) + defer dialCancel() + + conn, err := grpc.DialContext( + dialCtx, + "unix://"+socketPath, + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithBlock(), + grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { + dialer := net.Dialer{Timeout: s.dialTimeout} + return dialer.DialContext(ctx, "unix", socketPath) + }), + ) + if err != nil { + return nil, err + } + defer func() { + _ = conn.Close() + }() + + callCtx, callCancel := context.WithTimeout(ctx, s.dialTimeout) + defer callCancel() + if requestID := requestIDFromContext(ctx); requestID != "" { + callCtx = metadata.AppendToOutgoingContext(callCtx, "x-request-id", requestID) + } + resp, err := backupv1.NewBackupServiceClient(conn).Health(callCtx, &backupv1.HealthRequest{}) + if err != nil { + return nil, err + } + statusText := strings.TrimSpace(resp.GetStatus()) + if statusText == "" { + return nil, fmt.Errorf("empty backup health status") + } + return &DataManagementAgentInfo{ + Status: statusText, + Version: strings.TrimSpace(resp.GetVersion()), + UptimeSeconds: resp.GetUptimeSeconds(), + }, nil +} + +func requestIDFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + value, _ := ctx.Value(ctxkey.RequestID).(string) + return strings.TrimSpace(value) +} diff --git a/backend/internal/service/data_management_grpc_test.go b/backend/internal/service/data_management_grpc_test.go new file mode 100644 index 000000000..b65379b1a --- /dev/null +++ b/backend/internal/service/data_management_grpc_test.go @@ -0,0 +1,122 @@ +package service + +import ( + "errors" + "testing" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" +) + +func TestMapBackupGRPCError(t *testing.T) { + t.Parallel() + + socketPath := "/tmp/sub2api-backup.sock" + testCases := []struct { + name string + err error + wantCode int + wantReason string + }{ + { + name: "invalid argument", + err: grpcstatus.Error(codes.InvalidArgument, "bad request"), + wantCode: 400, + wantReason: backupInvalidArgumentReason, + }, + { + name: "not found", + err: grpcstatus.Error(codes.NotFound, "not found"), + wantCode: 404, + wantReason: backupResourceNotFoundReason, + }, + { + name: "failed precondition", + err: grpcstatus.Error(codes.FailedPrecondition, "precondition failed"), + wantCode: 412, + wantReason: backupFailedPrecondition, + }, + { + name: "unavailable", + err: grpcstatus.Error(codes.Unavailable, "agent unavailable"), + wantCode: 503, + wantReason: BackupAgentUnavailableReason, + }, + { + name: "deadline exceeded", + err: grpcstatus.Error(codes.DeadlineExceeded, "timeout"), + wantCode: 504, + wantReason: backupAgentTimeoutReason, + }, + { + name: "internal fallback", + err: grpcstatus.Error(codes.Internal, "internal"), + wantCode: 500, + wantReason: backupAgentInternalReason, + }, + { + name: "non grpc error", + err: errors.New("plain error"), + wantCode: 500, + wantReason: backupAgentInternalReason, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + mapped := mapBackupGRPCError(tc.err, socketPath) + statusCode, body := infraerrors.ToHTTP(mapped) + + require.Equal(t, tc.wantCode, statusCode) + require.Equal(t, tc.wantReason, body.Reason) + + if tc.wantCode == 503 || tc.wantCode == 500 { + require.Equal(t, socketPath, body.Metadata["socket_path"]) + } + }) + } +} + +func TestValidateDataManagementConfig(t *testing.T) { + t.Parallel() + + valid := DataManagementConfig{ + SourceMode: "direct", + BackupRoot: "/var/lib/sub2api/backups", + RetentionDays: 7, + KeepLast: 30, + Postgres: DataManagementPostgresConfig{ + Host: "127.0.0.1", + Port: 5432, + Database: "sub2api", + }, + Redis: DataManagementRedisConfig{ + Addr: "127.0.0.1:6379", + DB: 0, + }, + S3: DataManagementS3Config{ + Enabled: false, + }, + } + + require.NoError(t, validateDataManagementConfig(valid)) + + invalidMode := valid + invalidMode.SourceMode = "invalid" + require.Error(t, validateDataManagementConfig(invalidMode)) + + dockerMissingContainer := valid + dockerMissingContainer.SourceMode = "docker_exec" + require.Error(t, validateDataManagementConfig(dockerMissingContainer)) + + s3EnabledMissingBucket := valid + s3EnabledMissingBucket.S3.Enabled = true + s3EnabledMissingBucket.S3.Region = "us-east-1" + s3EnabledMissingBucket.S3.Bucket = "" + require.Error(t, validateDataManagementConfig(s3EnabledMissingBucket)) +} diff --git a/backend/internal/service/data_management_service.go b/backend/internal/service/data_management_service.go new file mode 100644 index 000000000..1047a891c --- /dev/null +++ b/backend/internal/service/data_management_service.go @@ -0,0 +1,123 @@ +package service + +import ( + "context" + "errors" + "net" + "os" + "strings" + "time" + + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" +) + +const ( + DefaultBackupAgentSocketPath = "/tmp/sub2api-backup.sock" + + BackupAgentSocketMissingReason = "BACKUP_AGENT_SOCKET_MISSING" + BackupAgentUnavailableReason = "BACKUP_AGENT_UNAVAILABLE" +) + +var ( + ErrBackupAgentSocketMissing = infraerrors.ServiceUnavailable( + BackupAgentSocketMissingReason, + "backup agent socket is missing", + ) + ErrBackupAgentUnavailable = infraerrors.ServiceUnavailable( + BackupAgentUnavailableReason, + "backup agent is unavailable", + ) +) + +type DataManagementAgentHealth struct { + Enabled bool + Reason string + SocketPath string + Agent *DataManagementAgentInfo +} + +type DataManagementAgentInfo struct { + Status string + Version string + UptimeSeconds int64 +} + +type DataManagementService struct { + socketPath string + dialTimeout time.Duration +} + +func NewDataManagementService() *DataManagementService { + return NewDataManagementServiceWithOptions(DefaultBackupAgentSocketPath, 500*time.Millisecond) +} + +func NewDataManagementServiceWithOptions(socketPath string, dialTimeout time.Duration) *DataManagementService { + path := strings.TrimSpace(socketPath) + if path == "" { + path = DefaultBackupAgentSocketPath + } + if dialTimeout <= 0 { + dialTimeout = 500 * time.Millisecond + } + return &DataManagementService{ + socketPath: path, + dialTimeout: dialTimeout, + } +} + +func (s *DataManagementService) SocketPath() string { + if s == nil || strings.TrimSpace(s.socketPath) == "" { + return DefaultBackupAgentSocketPath + } + return s.socketPath +} + +func (s *DataManagementService) GetAgentHealth(ctx context.Context) DataManagementAgentHealth { + socketPath := s.SocketPath() + health := DataManagementAgentHealth{ + Enabled: false, + Reason: BackupAgentUnavailableReason, + SocketPath: socketPath, + } + + info, err := os.Stat(socketPath) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + health.Reason = BackupAgentSocketMissingReason + } + return health + } + if info.Mode()&os.ModeSocket == 0 { + return health + } + + dialer := net.Dialer{Timeout: s.dialTimeout} + conn, err := dialer.DialContext(ctx, "unix", socketPath) + if err != nil { + return health + } + _ = conn.Close() + + agent, err := s.probeBackupHealth(ctx) + if err != nil { + return health + } + + health.Enabled = true + health.Reason = "" + health.Agent = agent + return health +} + +func (s *DataManagementService) EnsureAgentEnabled(ctx context.Context) error { + health := s.GetAgentHealth(ctx) + if health.Enabled { + return nil + } + + metadata := map[string]string{"socket_path": health.SocketPath} + if health.Reason == BackupAgentSocketMissingReason { + return ErrBackupAgentSocketMissing.WithMetadata(metadata) + } + return ErrBackupAgentUnavailable.WithMetadata(metadata) +} diff --git a/backend/internal/service/data_management_service_test.go b/backend/internal/service/data_management_service_test.go new file mode 100644 index 000000000..08574fe56 --- /dev/null +++ b/backend/internal/service/data_management_service_test.go @@ -0,0 +1,91 @@ +package service + +import ( + "context" + "fmt" + "net" + "os" + "path/filepath" + "testing" + "time" + + backupv1 "github.com/Wei-Shaw/sub2api/internal/backup/proto/backup/v1" + infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + "github.com/stretchr/testify/require" + "google.golang.org/grpc" +) + +func TestDataManagementService_GetAgentHealth_SocketMissing(t *testing.T) { + t.Parallel() + + svc := NewDataManagementServiceWithOptions(filepath.Join(t.TempDir(), "missing.sock"), 100*time.Millisecond) + health := svc.GetAgentHealth(context.Background()) + + require.False(t, health.Enabled) + require.Equal(t, BackupAgentSocketMissingReason, health.Reason) + require.NotEmpty(t, health.SocketPath) +} + +func TestDataManagementService_GetAgentHealth_SocketReachable(t *testing.T) { + t.Parallel() + + socketPath := filepath.Join(os.TempDir(), fmt.Sprintf("sub2api-dm-%d.sock", time.Now().UnixNano())) + startTestBackupHealthServer(t, socketPath) + + svc := NewDataManagementServiceWithOptions(socketPath, 100*time.Millisecond) + health := svc.GetAgentHealth(context.Background()) + + require.True(t, health.Enabled) + require.Equal(t, "", health.Reason) + require.Equal(t, socketPath, health.SocketPath) + require.NotNil(t, health.Agent) + require.Equal(t, "SERVING", health.Agent.Status) + require.Equal(t, "test-backupd", health.Agent.Version) + require.EqualValues(t, 42, health.Agent.UptimeSeconds) +} + +func TestDataManagementService_EnsureAgentEnabled(t *testing.T) { + t.Parallel() + + svc := NewDataManagementServiceWithOptions(filepath.Join(t.TempDir(), "missing.sock"), 100*time.Millisecond) + err := svc.EnsureAgentEnabled(context.Background()) + require.Error(t, err) + + statusCode, status := infraerrors.ToHTTP(err) + require.Equal(t, 503, statusCode) + require.Equal(t, BackupAgentSocketMissingReason, status.Reason) + require.Equal(t, svc.SocketPath(), status.Metadata["socket_path"]) +} + +func startTestBackupHealthServer(t *testing.T, socketPath string) { + t.Helper() + _ = os.Remove(socketPath) + + listener, err := net.Listen("unix", socketPath) + require.NoError(t, err) + + server := grpc.NewServer() + backupv1.RegisterBackupServiceServer(server, &testBackupHealthServer{}) + + go func() { + _ = server.Serve(listener) + }() + + t.Cleanup(func() { + server.Stop() + _ = listener.Close() + _ = os.Remove(socketPath) + }) +} + +type testBackupHealthServer struct { + backupv1.UnimplementedBackupServiceServer +} + +func (s *testBackupHealthServer) Health(context.Context, *backupv1.HealthRequest) (*backupv1.HealthResponse, error) { + return &backupv1.HealthResponse{ + Status: "SERVING", + Version: "test-backupd", + UptimeSeconds: 42, + }, nil +} diff --git a/backend/internal/service/wire.go b/backend/internal/service/wire.go index bd241566e..adc6777d1 100644 --- a/backend/internal/service/wire.go +++ b/backend/internal/service/wire.go @@ -327,6 +327,7 @@ var ProviderSet = wire.NewSet( NewAccountUsageService, NewAccountTestService, NewSettingService, + NewDataManagementService, ProvideOpsSystemLogSink, NewOpsService, ProvideOpsMetricsCollector, diff --git a/backup/README.md b/backup/README.md new file mode 100644 index 000000000..4c9845239 --- /dev/null +++ b/backup/README.md @@ -0,0 +1,49 @@ +# backupd 备份进程 + +`backupd` 是运行在宿主机上的备份执行进程,负责: + +- 接收主进程通过 gRPC Unix Socket 发起的备份管理请求 +- 执行 PostgreSQL / Redis / Full 备份任务 +- 将备份产物可选上传到标准 S3(`aws-sdk-go-v2`) +- 使用 Ent + SQLite 持久化备份配置与任务状态 + +## 1. 本地构建 + +```bash +cd backup +go build -o backupd ./cmd/backupd +``` + +## 2. 本地运行 + +```bash +cd backup +./backupd \ + -socket-path /tmp/sub2api-backup.sock \ + -sqlite-path /tmp/sub2api-backupd.db \ + -version dev +``` + +默认参数: + +- `-socket-path`: `/tmp/sub2api-backup.sock` +- `-sqlite-path`: `/tmp/sub2api-backupd.db` +- `-version`: `dev` + +## 3. 依赖要求 + +- PostgreSQL 客户端:`pg_dump` +- Redis 客户端:`redis-cli` +- 若使用 `docker_exec` 源模式:`docker` + +## 4. 与主进程协作要求 + +- 主进程固定探测 `/tmp/sub2api-backup.sock` +- 只有探测到该 UDS 且 `Health` 成功时,管理后台“数据管理”功能才会启用 +- `backupd` 本身不做业务鉴权,依赖主进程管理员鉴权 + UDS 文件权限 + +## 5. 生产建议 + +- 使用 `systemd` 托管进程(参考 `deploy/sub2api-backupd.service`) +- 建议 `backupd` 与 `sub2api` 在同一宿主机运行 +- 若 `sub2api` 在 Docker 容器内,需把宿主机 `/tmp/sub2api-backup.sock` 挂载到容器内同路径 diff --git a/backup/cmd/backupd/main.go b/backup/cmd/backupd/main.go new file mode 100644 index 000000000..8597b90c9 --- /dev/null +++ b/backup/cmd/backupd/main.go @@ -0,0 +1,114 @@ +package main + +import ( + "context" + "errors" + "flag" + "log" + "net" + "os" + "os/signal" + "strings" + "syscall" + "time" + + "github.com/Wei-Shaw/sub2api/backup/internal/executor" + "github.com/Wei-Shaw/sub2api/backup/internal/grpcserver" + "github.com/Wei-Shaw/sub2api/backup/internal/store/entstore" + backupv1 "github.com/Wei-Shaw/sub2api/backup/proto/backup/v1" + "google.golang.org/grpc" + "google.golang.org/grpc/health" + healthpb "google.golang.org/grpc/health/grpc_health_v1" +) + +func main() { + socketPath := flag.String("socket-path", "/tmp/sub2api-backup.sock", "backupd unix socket path") + sqlitePath := flag.String("sqlite-path", "/tmp/sub2api-backupd.db", "backupd sqlite database path") + version := flag.String("version", "dev", "backupd version") + flag.Parse() + + if err := run(strings.TrimSpace(*socketPath), strings.TrimSpace(*sqlitePath), strings.TrimSpace(*version)); err != nil { + log.Fatalf("backupd start failed: %v", err) + } +} + +func run(socketPath, sqlitePath, version string) error { + if socketPath == "" { + socketPath = "/tmp/sub2api-backup.sock" + } + if sqlitePath == "" { + sqlitePath = "/tmp/sub2api-backupd.db" + } + + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + store, err := entstore.Open(ctx, sqlitePath) + if err != nil { + return err + } + defer func() { + _ = store.Close() + }() + + runner := executor.NewRunner(store, executor.Options{Logger: log.Default()}) + if err := runner.Start(); err != nil { + return err + } + defer func() { + stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + _ = runner.Stop(stopCtx) + }() + + if err := os.Remove(socketPath); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + + listener, err := net.Listen("unix", socketPath) + if err != nil { + return err + } + defer func() { + _ = listener.Close() + _ = os.Remove(socketPath) + }() + + if err := os.Chmod(socketPath, 0o660); err != nil { + return err + } + + grpcServer := grpc.NewServer(grpc.UnaryInterceptor(grpcserver.UnaryServerInterceptor(log.Default()))) + healthServer := health.NewServer() + healthpb.RegisterHealthServer(grpcServer, healthServer) + healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) + healthServer.SetServingStatus("backup.v1.BackupService", healthpb.HealthCheckResponse_SERVING) + backupv1.RegisterBackupServiceServer(grpcServer, grpcserver.New(store, version, runner)) + + errCh := make(chan error, 1) + go func() { + log.Printf("backupd listening on %s", socketPath) + errCh <- grpcServer.Serve(listener) + }() + + sigCh := make(chan os.Signal, 1) + signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) + + select { + case sig := <-sigCh: + log.Printf("backupd shutting down, signal=%s", sig.String()) + stopped := make(chan struct{}) + go func() { + grpcServer.GracefulStop() + close(stopped) + }() + select { + case <-stopped: + return nil + case <-time.After(5 * time.Second): + grpcServer.Stop() + return nil + } + case err := <-errCh: + return err + } +} diff --git a/backup/ent/backupjob.go b/backup/ent/backupjob.go new file mode 100644 index 000000000..4079d4153 --- /dev/null +++ b/backup/ent/backupjob.go @@ -0,0 +1,319 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" +) + +// BackupJob is the model entity for the BackupJob schema. +type BackupJob struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // JobID holds the value of the "job_id" field. + JobID string `json:"job_id,omitempty"` + // BackupType holds the value of the "backup_type" field. + BackupType backupjob.BackupType `json:"backup_type,omitempty"` + // Status holds the value of the "status" field. + Status backupjob.Status `json:"status,omitempty"` + // TriggeredBy holds the value of the "triggered_by" field. + TriggeredBy string `json:"triggered_by,omitempty"` + // IdempotencyKey holds the value of the "idempotency_key" field. + IdempotencyKey string `json:"idempotency_key,omitempty"` + // UploadToS3 holds the value of the "upload_to_s3" field. + UploadToS3 bool `json:"upload_to_s3,omitempty"` + // StartedAt holds the value of the "started_at" field. + StartedAt *time.Time `json:"started_at,omitempty"` + // FinishedAt holds the value of the "finished_at" field. + FinishedAt *time.Time `json:"finished_at,omitempty"` + // ErrorMessage holds the value of the "error_message" field. + ErrorMessage string `json:"error_message,omitempty"` + // ArtifactLocalPath holds the value of the "artifact_local_path" field. + ArtifactLocalPath string `json:"artifact_local_path,omitempty"` + // ArtifactSizeBytes holds the value of the "artifact_size_bytes" field. + ArtifactSizeBytes *int64 `json:"artifact_size_bytes,omitempty"` + // ArtifactSha256 holds the value of the "artifact_sha256" field. + ArtifactSha256 string `json:"artifact_sha256,omitempty"` + // S3Bucket holds the value of the "s3_bucket" field. + S3Bucket string `json:"s3_bucket,omitempty"` + // S3Key holds the value of the "s3_key" field. + S3Key string `json:"s3_key,omitempty"` + // S3Etag holds the value of the "s3_etag" field. + S3Etag string `json:"s3_etag,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the BackupJobQuery when eager-loading is set. + Edges BackupJobEdges `json:"edges"` + selectValues sql.SelectValues +} + +// BackupJobEdges holds the relations/edges for other nodes in the graph. +type BackupJobEdges struct { + // Events holds the value of the events edge. + Events []*BackupJobEvent `json:"events,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// EventsOrErr returns the Events value or an error if the edge +// was not loaded in eager-loading. +func (e BackupJobEdges) EventsOrErr() ([]*BackupJobEvent, error) { + if e.loadedTypes[0] { + return e.Events, nil + } + return nil, &NotLoadedError{edge: "events"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*BackupJob) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case backupjob.FieldUploadToS3: + values[i] = new(sql.NullBool) + case backupjob.FieldID, backupjob.FieldArtifactSizeBytes: + values[i] = new(sql.NullInt64) + case backupjob.FieldJobID, backupjob.FieldBackupType, backupjob.FieldStatus, backupjob.FieldTriggeredBy, backupjob.FieldIdempotencyKey, backupjob.FieldErrorMessage, backupjob.FieldArtifactLocalPath, backupjob.FieldArtifactSha256, backupjob.FieldS3Bucket, backupjob.FieldS3Key, backupjob.FieldS3Etag: + values[i] = new(sql.NullString) + case backupjob.FieldStartedAt, backupjob.FieldFinishedAt, backupjob.FieldCreatedAt, backupjob.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the BackupJob fields. +func (_m *BackupJob) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case backupjob.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int(value.Int64) + case backupjob.FieldJobID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field job_id", values[i]) + } else if value.Valid { + _m.JobID = value.String + } + case backupjob.FieldBackupType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field backup_type", values[i]) + } else if value.Valid { + _m.BackupType = backupjob.BackupType(value.String) + } + case backupjob.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = backupjob.Status(value.String) + } + case backupjob.FieldTriggeredBy: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field triggered_by", values[i]) + } else if value.Valid { + _m.TriggeredBy = value.String + } + case backupjob.FieldIdempotencyKey: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field idempotency_key", values[i]) + } else if value.Valid { + _m.IdempotencyKey = value.String + } + case backupjob.FieldUploadToS3: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field upload_to_s3", values[i]) + } else if value.Valid { + _m.UploadToS3 = value.Bool + } + case backupjob.FieldStartedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field started_at", values[i]) + } else if value.Valid { + _m.StartedAt = new(time.Time) + *_m.StartedAt = value.Time + } + case backupjob.FieldFinishedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field finished_at", values[i]) + } else if value.Valid { + _m.FinishedAt = new(time.Time) + *_m.FinishedAt = value.Time + } + case backupjob.FieldErrorMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_message", values[i]) + } else if value.Valid { + _m.ErrorMessage = value.String + } + case backupjob.FieldArtifactLocalPath: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field artifact_local_path", values[i]) + } else if value.Valid { + _m.ArtifactLocalPath = value.String + } + case backupjob.FieldArtifactSizeBytes: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field artifact_size_bytes", values[i]) + } else if value.Valid { + _m.ArtifactSizeBytes = new(int64) + *_m.ArtifactSizeBytes = value.Int64 + } + case backupjob.FieldArtifactSha256: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field artifact_sha256", values[i]) + } else if value.Valid { + _m.ArtifactSha256 = value.String + } + case backupjob.FieldS3Bucket: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field s3_bucket", values[i]) + } else if value.Valid { + _m.S3Bucket = value.String + } + case backupjob.FieldS3Key: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field s3_key", values[i]) + } else if value.Valid { + _m.S3Key = value.String + } + case backupjob.FieldS3Etag: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field s3_etag", values[i]) + } else if value.Valid { + _m.S3Etag = value.String + } + case backupjob.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case backupjob.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the BackupJob. +// This includes values selected through modifiers, order, etc. +func (_m *BackupJob) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryEvents queries the "events" edge of the BackupJob entity. +func (_m *BackupJob) QueryEvents() *BackupJobEventQuery { + return NewBackupJobClient(_m.config).QueryEvents(_m) +} + +// Update returns a builder for updating this BackupJob. +// Note that you need to call BackupJob.Unwrap() before calling this method if this BackupJob +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *BackupJob) Update() *BackupJobUpdateOne { + return NewBackupJobClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the BackupJob entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *BackupJob) Unwrap() *BackupJob { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: BackupJob is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *BackupJob) String() string { + var builder strings.Builder + builder.WriteString("BackupJob(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("job_id=") + builder.WriteString(_m.JobID) + builder.WriteString(", ") + builder.WriteString("backup_type=") + builder.WriteString(fmt.Sprintf("%v", _m.BackupType)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(fmt.Sprintf("%v", _m.Status)) + builder.WriteString(", ") + builder.WriteString("triggered_by=") + builder.WriteString(_m.TriggeredBy) + builder.WriteString(", ") + builder.WriteString("idempotency_key=") + builder.WriteString(_m.IdempotencyKey) + builder.WriteString(", ") + builder.WriteString("upload_to_s3=") + builder.WriteString(fmt.Sprintf("%v", _m.UploadToS3)) + builder.WriteString(", ") + if v := _m.StartedAt; v != nil { + builder.WriteString("started_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.FinishedAt; v != nil { + builder.WriteString("finished_at=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("error_message=") + builder.WriteString(_m.ErrorMessage) + builder.WriteString(", ") + builder.WriteString("artifact_local_path=") + builder.WriteString(_m.ArtifactLocalPath) + builder.WriteString(", ") + if v := _m.ArtifactSizeBytes; v != nil { + builder.WriteString("artifact_size_bytes=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("artifact_sha256=") + builder.WriteString(_m.ArtifactSha256) + builder.WriteString(", ") + builder.WriteString("s3_bucket=") + builder.WriteString(_m.S3Bucket) + builder.WriteString(", ") + builder.WriteString("s3_key=") + builder.WriteString(_m.S3Key) + builder.WriteString(", ") + builder.WriteString("s3_etag=") + builder.WriteString(_m.S3Etag) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// BackupJobs is a parsable slice of BackupJob. +type BackupJobs []*BackupJob diff --git a/backup/ent/backupjob/backupjob.go b/backup/ent/backupjob/backupjob.go new file mode 100644 index 000000000..2411e00b2 --- /dev/null +++ b/backup/ent/backupjob/backupjob.go @@ -0,0 +1,275 @@ +// Code generated by ent, DO NOT EDIT. + +package backupjob + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the backupjob type in the database. + Label = "backup_job" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldJobID holds the string denoting the job_id field in the database. + FieldJobID = "job_id" + // FieldBackupType holds the string denoting the backup_type field in the database. + FieldBackupType = "backup_type" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldTriggeredBy holds the string denoting the triggered_by field in the database. + FieldTriggeredBy = "triggered_by" + // FieldIdempotencyKey holds the string denoting the idempotency_key field in the database. + FieldIdempotencyKey = "idempotency_key" + // FieldUploadToS3 holds the string denoting the upload_to_s3 field in the database. + FieldUploadToS3 = "upload_to_s3" + // FieldStartedAt holds the string denoting the started_at field in the database. + FieldStartedAt = "started_at" + // FieldFinishedAt holds the string denoting the finished_at field in the database. + FieldFinishedAt = "finished_at" + // FieldErrorMessage holds the string denoting the error_message field in the database. + FieldErrorMessage = "error_message" + // FieldArtifactLocalPath holds the string denoting the artifact_local_path field in the database. + FieldArtifactLocalPath = "artifact_local_path" + // FieldArtifactSizeBytes holds the string denoting the artifact_size_bytes field in the database. + FieldArtifactSizeBytes = "artifact_size_bytes" + // FieldArtifactSha256 holds the string denoting the artifact_sha256 field in the database. + FieldArtifactSha256 = "artifact_sha256" + // FieldS3Bucket holds the string denoting the s3_bucket field in the database. + FieldS3Bucket = "s3_bucket" + // FieldS3Key holds the string denoting the s3_key field in the database. + FieldS3Key = "s3_key" + // FieldS3Etag holds the string denoting the s3_etag field in the database. + FieldS3Etag = "s3_etag" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // EdgeEvents holds the string denoting the events edge name in mutations. + EdgeEvents = "events" + // Table holds the table name of the backupjob in the database. + Table = "backup_jobs" + // EventsTable is the table that holds the events relation/edge. + EventsTable = "backup_job_events" + // EventsInverseTable is the table name for the BackupJobEvent entity. + // It exists in this package in order to avoid circular dependency with the "backupjobevent" package. + EventsInverseTable = "backup_job_events" + // EventsColumn is the table column denoting the events relation/edge. + EventsColumn = "backup_job_id" +) + +// Columns holds all SQL columns for backupjob fields. +var Columns = []string{ + FieldID, + FieldJobID, + FieldBackupType, + FieldStatus, + FieldTriggeredBy, + FieldIdempotencyKey, + FieldUploadToS3, + FieldStartedAt, + FieldFinishedAt, + FieldErrorMessage, + FieldArtifactLocalPath, + FieldArtifactSizeBytes, + FieldArtifactSha256, + FieldS3Bucket, + FieldS3Key, + FieldS3Etag, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultTriggeredBy holds the default value on creation for the "triggered_by" field. + DefaultTriggeredBy string + // DefaultUploadToS3 holds the default value on creation for the "upload_to_s3" field. + DefaultUploadToS3 bool + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// BackupType defines the type for the "backup_type" enum field. +type BackupType string + +// BackupType values. +const ( + BackupTypePostgres BackupType = "postgres" + BackupTypeRedis BackupType = "redis" + BackupTypeFull BackupType = "full" +) + +func (bt BackupType) String() string { + return string(bt) +} + +// BackupTypeValidator is a validator for the "backup_type" field enum values. It is called by the builders before save. +func BackupTypeValidator(bt BackupType) error { + switch bt { + case BackupTypePostgres, BackupTypeRedis, BackupTypeFull: + return nil + default: + return fmt.Errorf("backupjob: invalid enum value for backup_type field: %q", bt) + } +} + +// Status defines the type for the "status" enum field. +type Status string + +// StatusQueued is the default value of the Status enum. +const DefaultStatus = StatusQueued + +// Status values. +const ( + StatusQueued Status = "queued" + StatusRunning Status = "running" + StatusSucceeded Status = "succeeded" + StatusFailed Status = "failed" + StatusPartialSucceeded Status = "partial_succeeded" +) + +func (s Status) String() string { + return string(s) +} + +// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. +func StatusValidator(s Status) error { + switch s { + case StatusQueued, StatusRunning, StatusSucceeded, StatusFailed, StatusPartialSucceeded: + return nil + default: + return fmt.Errorf("backupjob: invalid enum value for status field: %q", s) + } +} + +// OrderOption defines the ordering options for the BackupJob queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByJobID orders the results by the job_id field. +func ByJobID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldJobID, opts...).ToFunc() +} + +// ByBackupType orders the results by the backup_type field. +func ByBackupType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBackupType, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByTriggeredBy orders the results by the triggered_by field. +func ByTriggeredBy(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTriggeredBy, opts...).ToFunc() +} + +// ByIdempotencyKey orders the results by the idempotency_key field. +func ByIdempotencyKey(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIdempotencyKey, opts...).ToFunc() +} + +// ByUploadToS3 orders the results by the upload_to_s3 field. +func ByUploadToS3(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUploadToS3, opts...).ToFunc() +} + +// ByStartedAt orders the results by the started_at field. +func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStartedAt, opts...).ToFunc() +} + +// ByFinishedAt orders the results by the finished_at field. +func ByFinishedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFinishedAt, opts...).ToFunc() +} + +// ByErrorMessage orders the results by the error_message field. +func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() +} + +// ByArtifactLocalPath orders the results by the artifact_local_path field. +func ByArtifactLocalPath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldArtifactLocalPath, opts...).ToFunc() +} + +// ByArtifactSizeBytes orders the results by the artifact_size_bytes field. +func ByArtifactSizeBytes(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldArtifactSizeBytes, opts...).ToFunc() +} + +// ByArtifactSha256 orders the results by the artifact_sha256 field. +func ByArtifactSha256(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldArtifactSha256, opts...).ToFunc() +} + +// ByS3Bucket orders the results by the s3_bucket field. +func ByS3Bucket(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldS3Bucket, opts...).ToFunc() +} + +// ByS3Key orders the results by the s3_key field. +func ByS3Key(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldS3Key, opts...).ToFunc() +} + +// ByS3Etag orders the results by the s3_etag field. +func ByS3Etag(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldS3Etag, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByEventsCount orders the results by events count. +func ByEventsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newEventsStep(), opts...) + } +} + +// ByEvents orders the results by events terms. +func ByEvents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newEventsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newEventsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(EventsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, EventsTable, EventsColumn), + ) +} diff --git a/backup/ent/backupjob/where.go b/backup/ent/backupjob/where.go new file mode 100644 index 000000000..ef02344f3 --- /dev/null +++ b/backup/ent/backupjob/where.go @@ -0,0 +1,1104 @@ +// Code generated by ent, DO NOT EDIT. + +package backupjob + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldID, id)) +} + +// JobID applies equality check predicate on the "job_id" field. It's identical to JobIDEQ. +func JobID(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldJobID, v)) +} + +// TriggeredBy applies equality check predicate on the "triggered_by" field. It's identical to TriggeredByEQ. +func TriggeredBy(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldTriggeredBy, v)) +} + +// IdempotencyKey applies equality check predicate on the "idempotency_key" field. It's identical to IdempotencyKeyEQ. +func IdempotencyKey(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldIdempotencyKey, v)) +} + +// UploadToS3 applies equality check predicate on the "upload_to_s3" field. It's identical to UploadToS3EQ. +func UploadToS3(v bool) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldUploadToS3, v)) +} + +// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. +func StartedAt(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldStartedAt, v)) +} + +// FinishedAt applies equality check predicate on the "finished_at" field. It's identical to FinishedAtEQ. +func FinishedAt(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldFinishedAt, v)) +} + +// ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. +func ErrorMessage(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldErrorMessage, v)) +} + +// ArtifactLocalPath applies equality check predicate on the "artifact_local_path" field. It's identical to ArtifactLocalPathEQ. +func ArtifactLocalPath(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldArtifactLocalPath, v)) +} + +// ArtifactSizeBytes applies equality check predicate on the "artifact_size_bytes" field. It's identical to ArtifactSizeBytesEQ. +func ArtifactSizeBytes(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldArtifactSizeBytes, v)) +} + +// ArtifactSha256 applies equality check predicate on the "artifact_sha256" field. It's identical to ArtifactSha256EQ. +func ArtifactSha256(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldArtifactSha256, v)) +} + +// S3Bucket applies equality check predicate on the "s3_bucket" field. It's identical to S3BucketEQ. +func S3Bucket(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3Bucket, v)) +} + +// S3Key applies equality check predicate on the "s3_key" field. It's identical to S3KeyEQ. +func S3Key(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3Key, v)) +} + +// S3Etag applies equality check predicate on the "s3_etag" field. It's identical to S3EtagEQ. +func S3Etag(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3Etag, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// JobIDEQ applies the EQ predicate on the "job_id" field. +func JobIDEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldJobID, v)) +} + +// JobIDNEQ applies the NEQ predicate on the "job_id" field. +func JobIDNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldJobID, v)) +} + +// JobIDIn applies the In predicate on the "job_id" field. +func JobIDIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldJobID, vs...)) +} + +// JobIDNotIn applies the NotIn predicate on the "job_id" field. +func JobIDNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldJobID, vs...)) +} + +// JobIDGT applies the GT predicate on the "job_id" field. +func JobIDGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldJobID, v)) +} + +// JobIDGTE applies the GTE predicate on the "job_id" field. +func JobIDGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldJobID, v)) +} + +// JobIDLT applies the LT predicate on the "job_id" field. +func JobIDLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldJobID, v)) +} + +// JobIDLTE applies the LTE predicate on the "job_id" field. +func JobIDLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldJobID, v)) +} + +// JobIDContains applies the Contains predicate on the "job_id" field. +func JobIDContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldJobID, v)) +} + +// JobIDHasPrefix applies the HasPrefix predicate on the "job_id" field. +func JobIDHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldJobID, v)) +} + +// JobIDHasSuffix applies the HasSuffix predicate on the "job_id" field. +func JobIDHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldJobID, v)) +} + +// JobIDEqualFold applies the EqualFold predicate on the "job_id" field. +func JobIDEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldJobID, v)) +} + +// JobIDContainsFold applies the ContainsFold predicate on the "job_id" field. +func JobIDContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldJobID, v)) +} + +// BackupTypeEQ applies the EQ predicate on the "backup_type" field. +func BackupTypeEQ(v BackupType) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldBackupType, v)) +} + +// BackupTypeNEQ applies the NEQ predicate on the "backup_type" field. +func BackupTypeNEQ(v BackupType) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldBackupType, v)) +} + +// BackupTypeIn applies the In predicate on the "backup_type" field. +func BackupTypeIn(vs ...BackupType) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldBackupType, vs...)) +} + +// BackupTypeNotIn applies the NotIn predicate on the "backup_type" field. +func BackupTypeNotIn(vs ...BackupType) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldBackupType, vs...)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v Status) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v Status) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...Status) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...Status) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldStatus, vs...)) +} + +// TriggeredByEQ applies the EQ predicate on the "triggered_by" field. +func TriggeredByEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldTriggeredBy, v)) +} + +// TriggeredByNEQ applies the NEQ predicate on the "triggered_by" field. +func TriggeredByNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldTriggeredBy, v)) +} + +// TriggeredByIn applies the In predicate on the "triggered_by" field. +func TriggeredByIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldTriggeredBy, vs...)) +} + +// TriggeredByNotIn applies the NotIn predicate on the "triggered_by" field. +func TriggeredByNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldTriggeredBy, vs...)) +} + +// TriggeredByGT applies the GT predicate on the "triggered_by" field. +func TriggeredByGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldTriggeredBy, v)) +} + +// TriggeredByGTE applies the GTE predicate on the "triggered_by" field. +func TriggeredByGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldTriggeredBy, v)) +} + +// TriggeredByLT applies the LT predicate on the "triggered_by" field. +func TriggeredByLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldTriggeredBy, v)) +} + +// TriggeredByLTE applies the LTE predicate on the "triggered_by" field. +func TriggeredByLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldTriggeredBy, v)) +} + +// TriggeredByContains applies the Contains predicate on the "triggered_by" field. +func TriggeredByContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldTriggeredBy, v)) +} + +// TriggeredByHasPrefix applies the HasPrefix predicate on the "triggered_by" field. +func TriggeredByHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldTriggeredBy, v)) +} + +// TriggeredByHasSuffix applies the HasSuffix predicate on the "triggered_by" field. +func TriggeredByHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldTriggeredBy, v)) +} + +// TriggeredByEqualFold applies the EqualFold predicate on the "triggered_by" field. +func TriggeredByEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldTriggeredBy, v)) +} + +// TriggeredByContainsFold applies the ContainsFold predicate on the "triggered_by" field. +func TriggeredByContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldTriggeredBy, v)) +} + +// IdempotencyKeyEQ applies the EQ predicate on the "idempotency_key" field. +func IdempotencyKeyEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyNEQ applies the NEQ predicate on the "idempotency_key" field. +func IdempotencyKeyNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyIn applies the In predicate on the "idempotency_key" field. +func IdempotencyKeyIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldIdempotencyKey, vs...)) +} + +// IdempotencyKeyNotIn applies the NotIn predicate on the "idempotency_key" field. +func IdempotencyKeyNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldIdempotencyKey, vs...)) +} + +// IdempotencyKeyGT applies the GT predicate on the "idempotency_key" field. +func IdempotencyKeyGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyGTE applies the GTE predicate on the "idempotency_key" field. +func IdempotencyKeyGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyLT applies the LT predicate on the "idempotency_key" field. +func IdempotencyKeyLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyLTE applies the LTE predicate on the "idempotency_key" field. +func IdempotencyKeyLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyContains applies the Contains predicate on the "idempotency_key" field. +func IdempotencyKeyContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyHasPrefix applies the HasPrefix predicate on the "idempotency_key" field. +func IdempotencyKeyHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyHasSuffix applies the HasSuffix predicate on the "idempotency_key" field. +func IdempotencyKeyHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyIsNil applies the IsNil predicate on the "idempotency_key" field. +func IdempotencyKeyIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldIdempotencyKey)) +} + +// IdempotencyKeyNotNil applies the NotNil predicate on the "idempotency_key" field. +func IdempotencyKeyNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldIdempotencyKey)) +} + +// IdempotencyKeyEqualFold applies the EqualFold predicate on the "idempotency_key" field. +func IdempotencyKeyEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldIdempotencyKey, v)) +} + +// IdempotencyKeyContainsFold applies the ContainsFold predicate on the "idempotency_key" field. +func IdempotencyKeyContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldIdempotencyKey, v)) +} + +// UploadToS3EQ applies the EQ predicate on the "upload_to_s3" field. +func UploadToS3EQ(v bool) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldUploadToS3, v)) +} + +// UploadToS3NEQ applies the NEQ predicate on the "upload_to_s3" field. +func UploadToS3NEQ(v bool) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldUploadToS3, v)) +} + +// StartedAtEQ applies the EQ predicate on the "started_at" field. +func StartedAtEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldStartedAt, v)) +} + +// StartedAtNEQ applies the NEQ predicate on the "started_at" field. +func StartedAtNEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldStartedAt, v)) +} + +// StartedAtIn applies the In predicate on the "started_at" field. +func StartedAtIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldStartedAt, vs...)) +} + +// StartedAtNotIn applies the NotIn predicate on the "started_at" field. +func StartedAtNotIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldStartedAt, vs...)) +} + +// StartedAtGT applies the GT predicate on the "started_at" field. +func StartedAtGT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldStartedAt, v)) +} + +// StartedAtGTE applies the GTE predicate on the "started_at" field. +func StartedAtGTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldStartedAt, v)) +} + +// StartedAtLT applies the LT predicate on the "started_at" field. +func StartedAtLT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldStartedAt, v)) +} + +// StartedAtLTE applies the LTE predicate on the "started_at" field. +func StartedAtLTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldStartedAt, v)) +} + +// StartedAtIsNil applies the IsNil predicate on the "started_at" field. +func StartedAtIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldStartedAt)) +} + +// StartedAtNotNil applies the NotNil predicate on the "started_at" field. +func StartedAtNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldStartedAt)) +} + +// FinishedAtEQ applies the EQ predicate on the "finished_at" field. +func FinishedAtEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldFinishedAt, v)) +} + +// FinishedAtNEQ applies the NEQ predicate on the "finished_at" field. +func FinishedAtNEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldFinishedAt, v)) +} + +// FinishedAtIn applies the In predicate on the "finished_at" field. +func FinishedAtIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldFinishedAt, vs...)) +} + +// FinishedAtNotIn applies the NotIn predicate on the "finished_at" field. +func FinishedAtNotIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldFinishedAt, vs...)) +} + +// FinishedAtGT applies the GT predicate on the "finished_at" field. +func FinishedAtGT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldFinishedAt, v)) +} + +// FinishedAtGTE applies the GTE predicate on the "finished_at" field. +func FinishedAtGTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldFinishedAt, v)) +} + +// FinishedAtLT applies the LT predicate on the "finished_at" field. +func FinishedAtLT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldFinishedAt, v)) +} + +// FinishedAtLTE applies the LTE predicate on the "finished_at" field. +func FinishedAtLTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldFinishedAt, v)) +} + +// FinishedAtIsNil applies the IsNil predicate on the "finished_at" field. +func FinishedAtIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldFinishedAt)) +} + +// FinishedAtNotNil applies the NotNil predicate on the "finished_at" field. +func FinishedAtNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldFinishedAt)) +} + +// ErrorMessageEQ applies the EQ predicate on the "error_message" field. +func ErrorMessageEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldErrorMessage, v)) +} + +// ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. +func ErrorMessageNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldErrorMessage, v)) +} + +// ErrorMessageIn applies the In predicate on the "error_message" field. +func ErrorMessageIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. +func ErrorMessageNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldErrorMessage, vs...)) +} + +// ErrorMessageGT applies the GT predicate on the "error_message" field. +func ErrorMessageGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldErrorMessage, v)) +} + +// ErrorMessageGTE applies the GTE predicate on the "error_message" field. +func ErrorMessageGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldErrorMessage, v)) +} + +// ErrorMessageLT applies the LT predicate on the "error_message" field. +func ErrorMessageLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldErrorMessage, v)) +} + +// ErrorMessageLTE applies the LTE predicate on the "error_message" field. +func ErrorMessageLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldErrorMessage, v)) +} + +// ErrorMessageContains applies the Contains predicate on the "error_message" field. +func ErrorMessageContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldErrorMessage, v)) +} + +// ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. +func ErrorMessageHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldErrorMessage, v)) +} + +// ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. +func ErrorMessageHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldErrorMessage, v)) +} + +// ErrorMessageIsNil applies the IsNil predicate on the "error_message" field. +func ErrorMessageIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldErrorMessage)) +} + +// ErrorMessageNotNil applies the NotNil predicate on the "error_message" field. +func ErrorMessageNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldErrorMessage)) +} + +// ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. +func ErrorMessageEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldErrorMessage, v)) +} + +// ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. +func ErrorMessageContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldErrorMessage, v)) +} + +// ArtifactLocalPathEQ applies the EQ predicate on the "artifact_local_path" field. +func ArtifactLocalPathEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathNEQ applies the NEQ predicate on the "artifact_local_path" field. +func ArtifactLocalPathNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathIn applies the In predicate on the "artifact_local_path" field. +func ArtifactLocalPathIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldArtifactLocalPath, vs...)) +} + +// ArtifactLocalPathNotIn applies the NotIn predicate on the "artifact_local_path" field. +func ArtifactLocalPathNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldArtifactLocalPath, vs...)) +} + +// ArtifactLocalPathGT applies the GT predicate on the "artifact_local_path" field. +func ArtifactLocalPathGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathGTE applies the GTE predicate on the "artifact_local_path" field. +func ArtifactLocalPathGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathLT applies the LT predicate on the "artifact_local_path" field. +func ArtifactLocalPathLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathLTE applies the LTE predicate on the "artifact_local_path" field. +func ArtifactLocalPathLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathContains applies the Contains predicate on the "artifact_local_path" field. +func ArtifactLocalPathContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathHasPrefix applies the HasPrefix predicate on the "artifact_local_path" field. +func ArtifactLocalPathHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathHasSuffix applies the HasSuffix predicate on the "artifact_local_path" field. +func ArtifactLocalPathHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathIsNil applies the IsNil predicate on the "artifact_local_path" field. +func ArtifactLocalPathIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldArtifactLocalPath)) +} + +// ArtifactLocalPathNotNil applies the NotNil predicate on the "artifact_local_path" field. +func ArtifactLocalPathNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldArtifactLocalPath)) +} + +// ArtifactLocalPathEqualFold applies the EqualFold predicate on the "artifact_local_path" field. +func ArtifactLocalPathEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldArtifactLocalPath, v)) +} + +// ArtifactLocalPathContainsFold applies the ContainsFold predicate on the "artifact_local_path" field. +func ArtifactLocalPathContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldArtifactLocalPath, v)) +} + +// ArtifactSizeBytesEQ applies the EQ predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesEQ(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldArtifactSizeBytes, v)) +} + +// ArtifactSizeBytesNEQ applies the NEQ predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesNEQ(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldArtifactSizeBytes, v)) +} + +// ArtifactSizeBytesIn applies the In predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesIn(vs ...int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldArtifactSizeBytes, vs...)) +} + +// ArtifactSizeBytesNotIn applies the NotIn predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesNotIn(vs ...int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldArtifactSizeBytes, vs...)) +} + +// ArtifactSizeBytesGT applies the GT predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesGT(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldArtifactSizeBytes, v)) +} + +// ArtifactSizeBytesGTE applies the GTE predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesGTE(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldArtifactSizeBytes, v)) +} + +// ArtifactSizeBytesLT applies the LT predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesLT(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldArtifactSizeBytes, v)) +} + +// ArtifactSizeBytesLTE applies the LTE predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesLTE(v int64) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldArtifactSizeBytes, v)) +} + +// ArtifactSizeBytesIsNil applies the IsNil predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldArtifactSizeBytes)) +} + +// ArtifactSizeBytesNotNil applies the NotNil predicate on the "artifact_size_bytes" field. +func ArtifactSizeBytesNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldArtifactSizeBytes)) +} + +// ArtifactSha256EQ applies the EQ predicate on the "artifact_sha256" field. +func ArtifactSha256EQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldArtifactSha256, v)) +} + +// ArtifactSha256NEQ applies the NEQ predicate on the "artifact_sha256" field. +func ArtifactSha256NEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldArtifactSha256, v)) +} + +// ArtifactSha256In applies the In predicate on the "artifact_sha256" field. +func ArtifactSha256In(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldArtifactSha256, vs...)) +} + +// ArtifactSha256NotIn applies the NotIn predicate on the "artifact_sha256" field. +func ArtifactSha256NotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldArtifactSha256, vs...)) +} + +// ArtifactSha256GT applies the GT predicate on the "artifact_sha256" field. +func ArtifactSha256GT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldArtifactSha256, v)) +} + +// ArtifactSha256GTE applies the GTE predicate on the "artifact_sha256" field. +func ArtifactSha256GTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldArtifactSha256, v)) +} + +// ArtifactSha256LT applies the LT predicate on the "artifact_sha256" field. +func ArtifactSha256LT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldArtifactSha256, v)) +} + +// ArtifactSha256LTE applies the LTE predicate on the "artifact_sha256" field. +func ArtifactSha256LTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldArtifactSha256, v)) +} + +// ArtifactSha256Contains applies the Contains predicate on the "artifact_sha256" field. +func ArtifactSha256Contains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldArtifactSha256, v)) +} + +// ArtifactSha256HasPrefix applies the HasPrefix predicate on the "artifact_sha256" field. +func ArtifactSha256HasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldArtifactSha256, v)) +} + +// ArtifactSha256HasSuffix applies the HasSuffix predicate on the "artifact_sha256" field. +func ArtifactSha256HasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldArtifactSha256, v)) +} + +// ArtifactSha256IsNil applies the IsNil predicate on the "artifact_sha256" field. +func ArtifactSha256IsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldArtifactSha256)) +} + +// ArtifactSha256NotNil applies the NotNil predicate on the "artifact_sha256" field. +func ArtifactSha256NotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldArtifactSha256)) +} + +// ArtifactSha256EqualFold applies the EqualFold predicate on the "artifact_sha256" field. +func ArtifactSha256EqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldArtifactSha256, v)) +} + +// ArtifactSha256ContainsFold applies the ContainsFold predicate on the "artifact_sha256" field. +func ArtifactSha256ContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldArtifactSha256, v)) +} + +// S3BucketEQ applies the EQ predicate on the "s3_bucket" field. +func S3BucketEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3Bucket, v)) +} + +// S3BucketNEQ applies the NEQ predicate on the "s3_bucket" field. +func S3BucketNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldS3Bucket, v)) +} + +// S3BucketIn applies the In predicate on the "s3_bucket" field. +func S3BucketIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldS3Bucket, vs...)) +} + +// S3BucketNotIn applies the NotIn predicate on the "s3_bucket" field. +func S3BucketNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldS3Bucket, vs...)) +} + +// S3BucketGT applies the GT predicate on the "s3_bucket" field. +func S3BucketGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldS3Bucket, v)) +} + +// S3BucketGTE applies the GTE predicate on the "s3_bucket" field. +func S3BucketGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldS3Bucket, v)) +} + +// S3BucketLT applies the LT predicate on the "s3_bucket" field. +func S3BucketLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldS3Bucket, v)) +} + +// S3BucketLTE applies the LTE predicate on the "s3_bucket" field. +func S3BucketLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldS3Bucket, v)) +} + +// S3BucketContains applies the Contains predicate on the "s3_bucket" field. +func S3BucketContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldS3Bucket, v)) +} + +// S3BucketHasPrefix applies the HasPrefix predicate on the "s3_bucket" field. +func S3BucketHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldS3Bucket, v)) +} + +// S3BucketHasSuffix applies the HasSuffix predicate on the "s3_bucket" field. +func S3BucketHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldS3Bucket, v)) +} + +// S3BucketIsNil applies the IsNil predicate on the "s3_bucket" field. +func S3BucketIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldS3Bucket)) +} + +// S3BucketNotNil applies the NotNil predicate on the "s3_bucket" field. +func S3BucketNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldS3Bucket)) +} + +// S3BucketEqualFold applies the EqualFold predicate on the "s3_bucket" field. +func S3BucketEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldS3Bucket, v)) +} + +// S3BucketContainsFold applies the ContainsFold predicate on the "s3_bucket" field. +func S3BucketContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldS3Bucket, v)) +} + +// S3KeyEQ applies the EQ predicate on the "s3_key" field. +func S3KeyEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3Key, v)) +} + +// S3KeyNEQ applies the NEQ predicate on the "s3_key" field. +func S3KeyNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldS3Key, v)) +} + +// S3KeyIn applies the In predicate on the "s3_key" field. +func S3KeyIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldS3Key, vs...)) +} + +// S3KeyNotIn applies the NotIn predicate on the "s3_key" field. +func S3KeyNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldS3Key, vs...)) +} + +// S3KeyGT applies the GT predicate on the "s3_key" field. +func S3KeyGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldS3Key, v)) +} + +// S3KeyGTE applies the GTE predicate on the "s3_key" field. +func S3KeyGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldS3Key, v)) +} + +// S3KeyLT applies the LT predicate on the "s3_key" field. +func S3KeyLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldS3Key, v)) +} + +// S3KeyLTE applies the LTE predicate on the "s3_key" field. +func S3KeyLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldS3Key, v)) +} + +// S3KeyContains applies the Contains predicate on the "s3_key" field. +func S3KeyContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldS3Key, v)) +} + +// S3KeyHasPrefix applies the HasPrefix predicate on the "s3_key" field. +func S3KeyHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldS3Key, v)) +} + +// S3KeyHasSuffix applies the HasSuffix predicate on the "s3_key" field. +func S3KeyHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldS3Key, v)) +} + +// S3KeyIsNil applies the IsNil predicate on the "s3_key" field. +func S3KeyIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldS3Key)) +} + +// S3KeyNotNil applies the NotNil predicate on the "s3_key" field. +func S3KeyNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldS3Key)) +} + +// S3KeyEqualFold applies the EqualFold predicate on the "s3_key" field. +func S3KeyEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldS3Key, v)) +} + +// S3KeyContainsFold applies the ContainsFold predicate on the "s3_key" field. +func S3KeyContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldS3Key, v)) +} + +// S3EtagEQ applies the EQ predicate on the "s3_etag" field. +func S3EtagEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3Etag, v)) +} + +// S3EtagNEQ applies the NEQ predicate on the "s3_etag" field. +func S3EtagNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldS3Etag, v)) +} + +// S3EtagIn applies the In predicate on the "s3_etag" field. +func S3EtagIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldS3Etag, vs...)) +} + +// S3EtagNotIn applies the NotIn predicate on the "s3_etag" field. +func S3EtagNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldS3Etag, vs...)) +} + +// S3EtagGT applies the GT predicate on the "s3_etag" field. +func S3EtagGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldS3Etag, v)) +} + +// S3EtagGTE applies the GTE predicate on the "s3_etag" field. +func S3EtagGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldS3Etag, v)) +} + +// S3EtagLT applies the LT predicate on the "s3_etag" field. +func S3EtagLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldS3Etag, v)) +} + +// S3EtagLTE applies the LTE predicate on the "s3_etag" field. +func S3EtagLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldS3Etag, v)) +} + +// S3EtagContains applies the Contains predicate on the "s3_etag" field. +func S3EtagContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldS3Etag, v)) +} + +// S3EtagHasPrefix applies the HasPrefix predicate on the "s3_etag" field. +func S3EtagHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldS3Etag, v)) +} + +// S3EtagHasSuffix applies the HasSuffix predicate on the "s3_etag" field. +func S3EtagHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldS3Etag, v)) +} + +// S3EtagIsNil applies the IsNil predicate on the "s3_etag" field. +func S3EtagIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldS3Etag)) +} + +// S3EtagNotNil applies the NotNil predicate on the "s3_etag" field. +func S3EtagNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldS3Etag)) +} + +// S3EtagEqualFold applies the EqualFold predicate on the "s3_etag" field. +func S3EtagEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldS3Etag, v)) +} + +// S3EtagContainsFold applies the ContainsFold predicate on the "s3_etag" field. +func S3EtagContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldS3Etag, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// HasEvents applies the HasEdge predicate on the "events" edge. +func HasEvents() predicate.BackupJob { + return predicate.BackupJob(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, EventsTable, EventsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasEventsWith applies the HasEdge predicate on the "events" edge with a given conditions (other predicates). +func HasEventsWith(preds ...predicate.BackupJobEvent) predicate.BackupJob { + return predicate.BackupJob(func(s *sql.Selector) { + step := newEventsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.BackupJob) predicate.BackupJob { + return predicate.BackupJob(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.BackupJob) predicate.BackupJob { + return predicate.BackupJob(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.BackupJob) predicate.BackupJob { + return predicate.BackupJob(sql.NotPredicates(p)) +} diff --git a/backup/ent/backupjob_create.go b/backup/ent/backupjob_create.go new file mode 100644 index 000000000..460996fe4 --- /dev/null +++ b/backup/ent/backupjob_create.go @@ -0,0 +1,550 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" +) + +// BackupJobCreate is the builder for creating a BackupJob entity. +type BackupJobCreate struct { + config + mutation *BackupJobMutation + hooks []Hook +} + +// SetJobID sets the "job_id" field. +func (_c *BackupJobCreate) SetJobID(v string) *BackupJobCreate { + _c.mutation.SetJobID(v) + return _c +} + +// SetBackupType sets the "backup_type" field. +func (_c *BackupJobCreate) SetBackupType(v backupjob.BackupType) *BackupJobCreate { + _c.mutation.SetBackupType(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *BackupJobCreate) SetStatus(v backupjob.Status) *BackupJobCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableStatus(v *backupjob.Status) *BackupJobCreate { + if v != nil { + _c.SetStatus(*v) + } + return _c +} + +// SetTriggeredBy sets the "triggered_by" field. +func (_c *BackupJobCreate) SetTriggeredBy(v string) *BackupJobCreate { + _c.mutation.SetTriggeredBy(v) + return _c +} + +// SetNillableTriggeredBy sets the "triggered_by" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableTriggeredBy(v *string) *BackupJobCreate { + if v != nil { + _c.SetTriggeredBy(*v) + } + return _c +} + +// SetIdempotencyKey sets the "idempotency_key" field. +func (_c *BackupJobCreate) SetIdempotencyKey(v string) *BackupJobCreate { + _c.mutation.SetIdempotencyKey(v) + return _c +} + +// SetNillableIdempotencyKey sets the "idempotency_key" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableIdempotencyKey(v *string) *BackupJobCreate { + if v != nil { + _c.SetIdempotencyKey(*v) + } + return _c +} + +// SetUploadToS3 sets the "upload_to_s3" field. +func (_c *BackupJobCreate) SetUploadToS3(v bool) *BackupJobCreate { + _c.mutation.SetUploadToS3(v) + return _c +} + +// SetNillableUploadToS3 sets the "upload_to_s3" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableUploadToS3(v *bool) *BackupJobCreate { + if v != nil { + _c.SetUploadToS3(*v) + } + return _c +} + +// SetStartedAt sets the "started_at" field. +func (_c *BackupJobCreate) SetStartedAt(v time.Time) *BackupJobCreate { + _c.mutation.SetStartedAt(v) + return _c +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableStartedAt(v *time.Time) *BackupJobCreate { + if v != nil { + _c.SetStartedAt(*v) + } + return _c +} + +// SetFinishedAt sets the "finished_at" field. +func (_c *BackupJobCreate) SetFinishedAt(v time.Time) *BackupJobCreate { + _c.mutation.SetFinishedAt(v) + return _c +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableFinishedAt(v *time.Time) *BackupJobCreate { + if v != nil { + _c.SetFinishedAt(*v) + } + return _c +} + +// SetErrorMessage sets the "error_message" field. +func (_c *BackupJobCreate) SetErrorMessage(v string) *BackupJobCreate { + _c.mutation.SetErrorMessage(v) + return _c +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableErrorMessage(v *string) *BackupJobCreate { + if v != nil { + _c.SetErrorMessage(*v) + } + return _c +} + +// SetArtifactLocalPath sets the "artifact_local_path" field. +func (_c *BackupJobCreate) SetArtifactLocalPath(v string) *BackupJobCreate { + _c.mutation.SetArtifactLocalPath(v) + return _c +} + +// SetNillableArtifactLocalPath sets the "artifact_local_path" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableArtifactLocalPath(v *string) *BackupJobCreate { + if v != nil { + _c.SetArtifactLocalPath(*v) + } + return _c +} + +// SetArtifactSizeBytes sets the "artifact_size_bytes" field. +func (_c *BackupJobCreate) SetArtifactSizeBytes(v int64) *BackupJobCreate { + _c.mutation.SetArtifactSizeBytes(v) + return _c +} + +// SetNillableArtifactSizeBytes sets the "artifact_size_bytes" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableArtifactSizeBytes(v *int64) *BackupJobCreate { + if v != nil { + _c.SetArtifactSizeBytes(*v) + } + return _c +} + +// SetArtifactSha256 sets the "artifact_sha256" field. +func (_c *BackupJobCreate) SetArtifactSha256(v string) *BackupJobCreate { + _c.mutation.SetArtifactSha256(v) + return _c +} + +// SetNillableArtifactSha256 sets the "artifact_sha256" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableArtifactSha256(v *string) *BackupJobCreate { + if v != nil { + _c.SetArtifactSha256(*v) + } + return _c +} + +// SetS3Bucket sets the "s3_bucket" field. +func (_c *BackupJobCreate) SetS3Bucket(v string) *BackupJobCreate { + _c.mutation.SetS3Bucket(v) + return _c +} + +// SetNillableS3Bucket sets the "s3_bucket" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableS3Bucket(v *string) *BackupJobCreate { + if v != nil { + _c.SetS3Bucket(*v) + } + return _c +} + +// SetS3Key sets the "s3_key" field. +func (_c *BackupJobCreate) SetS3Key(v string) *BackupJobCreate { + _c.mutation.SetS3Key(v) + return _c +} + +// SetNillableS3Key sets the "s3_key" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableS3Key(v *string) *BackupJobCreate { + if v != nil { + _c.SetS3Key(*v) + } + return _c +} + +// SetS3Etag sets the "s3_etag" field. +func (_c *BackupJobCreate) SetS3Etag(v string) *BackupJobCreate { + _c.mutation.SetS3Etag(v) + return _c +} + +// SetNillableS3Etag sets the "s3_etag" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableS3Etag(v *string) *BackupJobCreate { + if v != nil { + _c.SetS3Etag(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *BackupJobCreate) SetCreatedAt(v time.Time) *BackupJobCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableCreatedAt(v *time.Time) *BackupJobCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *BackupJobCreate) SetUpdatedAt(v time.Time) *BackupJobCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableUpdatedAt(v *time.Time) *BackupJobCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// AddEventIDs adds the "events" edge to the BackupJobEvent entity by IDs. +func (_c *BackupJobCreate) AddEventIDs(ids ...int) *BackupJobCreate { + _c.mutation.AddEventIDs(ids...) + return _c +} + +// AddEvents adds the "events" edges to the BackupJobEvent entity. +func (_c *BackupJobCreate) AddEvents(v ...*BackupJobEvent) *BackupJobCreate { + ids := make([]int, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _c.AddEventIDs(ids...) +} + +// Mutation returns the BackupJobMutation object of the builder. +func (_c *BackupJobCreate) Mutation() *BackupJobMutation { + return _c.mutation +} + +// Save creates the BackupJob in the database. +func (_c *BackupJobCreate) Save(ctx context.Context) (*BackupJob, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *BackupJobCreate) SaveX(ctx context.Context) *BackupJob { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupJobCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupJobCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *BackupJobCreate) defaults() { + if _, ok := _c.mutation.Status(); !ok { + v := backupjob.DefaultStatus + _c.mutation.SetStatus(v) + } + if _, ok := _c.mutation.TriggeredBy(); !ok { + v := backupjob.DefaultTriggeredBy + _c.mutation.SetTriggeredBy(v) + } + if _, ok := _c.mutation.UploadToS3(); !ok { + v := backupjob.DefaultUploadToS3 + _c.mutation.SetUploadToS3(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := backupjob.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := backupjob.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *BackupJobCreate) check() error { + if _, ok := _c.mutation.JobID(); !ok { + return &ValidationError{Name: "job_id", err: errors.New(`ent: missing required field "BackupJob.job_id"`)} + } + if _, ok := _c.mutation.BackupType(); !ok { + return &ValidationError{Name: "backup_type", err: errors.New(`ent: missing required field "BackupJob.backup_type"`)} + } + if v, ok := _c.mutation.BackupType(); ok { + if err := backupjob.BackupTypeValidator(v); err != nil { + return &ValidationError{Name: "backup_type", err: fmt.Errorf(`ent: validator failed for field "BackupJob.backup_type": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "BackupJob.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := backupjob.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "BackupJob.status": %w`, err)} + } + } + if _, ok := _c.mutation.TriggeredBy(); !ok { + return &ValidationError{Name: "triggered_by", err: errors.New(`ent: missing required field "BackupJob.triggered_by"`)} + } + if _, ok := _c.mutation.UploadToS3(); !ok { + return &ValidationError{Name: "upload_to_s3", err: errors.New(`ent: missing required field "BackupJob.upload_to_s3"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupJob.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupJob.updated_at"`)} + } + return nil +} + +func (_c *BackupJobCreate) sqlSave(ctx context.Context) (*BackupJob, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *BackupJobCreate) createSpec() (*BackupJob, *sqlgraph.CreateSpec) { + var ( + _node = &BackupJob{config: _c.config} + _spec = sqlgraph.NewCreateSpec(backupjob.Table, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) + ) + if value, ok := _c.mutation.JobID(); ok { + _spec.SetField(backupjob.FieldJobID, field.TypeString, value) + _node.JobID = value + } + if value, ok := _c.mutation.BackupType(); ok { + _spec.SetField(backupjob.FieldBackupType, field.TypeEnum, value) + _node.BackupType = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(backupjob.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if value, ok := _c.mutation.TriggeredBy(); ok { + _spec.SetField(backupjob.FieldTriggeredBy, field.TypeString, value) + _node.TriggeredBy = value + } + if value, ok := _c.mutation.IdempotencyKey(); ok { + _spec.SetField(backupjob.FieldIdempotencyKey, field.TypeString, value) + _node.IdempotencyKey = value + } + if value, ok := _c.mutation.UploadToS3(); ok { + _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) + _node.UploadToS3 = value + } + if value, ok := _c.mutation.StartedAt(); ok { + _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) + _node.StartedAt = &value + } + if value, ok := _c.mutation.FinishedAt(); ok { + _spec.SetField(backupjob.FieldFinishedAt, field.TypeTime, value) + _node.FinishedAt = &value + } + if value, ok := _c.mutation.ErrorMessage(); ok { + _spec.SetField(backupjob.FieldErrorMessage, field.TypeString, value) + _node.ErrorMessage = value + } + if value, ok := _c.mutation.ArtifactLocalPath(); ok { + _spec.SetField(backupjob.FieldArtifactLocalPath, field.TypeString, value) + _node.ArtifactLocalPath = value + } + if value, ok := _c.mutation.ArtifactSizeBytes(); ok { + _spec.SetField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) + _node.ArtifactSizeBytes = &value + } + if value, ok := _c.mutation.ArtifactSha256(); ok { + _spec.SetField(backupjob.FieldArtifactSha256, field.TypeString, value) + _node.ArtifactSha256 = value + } + if value, ok := _c.mutation.S3Bucket(); ok { + _spec.SetField(backupjob.FieldS3Bucket, field.TypeString, value) + _node.S3Bucket = value + } + if value, ok := _c.mutation.S3Key(); ok { + _spec.SetField(backupjob.FieldS3Key, field.TypeString, value) + _node.S3Key = value + } + if value, ok := _c.mutation.S3Etag(); ok { + _spec.SetField(backupjob.FieldS3Etag, field.TypeString, value) + _node.S3Etag = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(backupjob.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(backupjob.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if nodes := _c.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// BackupJobCreateBulk is the builder for creating many BackupJob entities in bulk. +type BackupJobCreateBulk struct { + config + err error + builders []*BackupJobCreate +} + +// Save creates the BackupJob entities in the database. +func (_c *BackupJobCreateBulk) Save(ctx context.Context) ([]*BackupJob, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*BackupJob, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BackupJobMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *BackupJobCreateBulk) SaveX(ctx context.Context) []*BackupJob { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupJobCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupJobCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupjob_delete.go b/backup/ent/backupjob_delete.go new file mode 100644 index 000000000..8aba6a0c9 --- /dev/null +++ b/backup/ent/backupjob_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupJobDelete is the builder for deleting a BackupJob entity. +type BackupJobDelete struct { + config + hooks []Hook + mutation *BackupJobMutation +} + +// Where appends a list predicates to the BackupJobDelete builder. +func (_d *BackupJobDelete) Where(ps ...predicate.BackupJob) *BackupJobDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *BackupJobDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupJobDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *BackupJobDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(backupjob.Table, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// BackupJobDeleteOne is the builder for deleting a single BackupJob entity. +type BackupJobDeleteOne struct { + _d *BackupJobDelete +} + +// Where appends a list predicates to the BackupJobDelete builder. +func (_d *BackupJobDeleteOne) Where(ps ...predicate.BackupJob) *BackupJobDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *BackupJobDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{backupjob.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupJobDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupjob_query.go b/backup/ent/backupjob_query.go new file mode 100644 index 000000000..4ec719a6d --- /dev/null +++ b/backup/ent/backupjob_query.go @@ -0,0 +1,606 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupJobQuery is the builder for querying BackupJob entities. +type BackupJobQuery struct { + config + ctx *QueryContext + order []backupjob.OrderOption + inters []Interceptor + predicates []predicate.BackupJob + withEvents *BackupJobEventQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the BackupJobQuery builder. +func (_q *BackupJobQuery) Where(ps ...predicate.BackupJob) *BackupJobQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *BackupJobQuery) Limit(limit int) *BackupJobQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *BackupJobQuery) Offset(offset int) *BackupJobQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *BackupJobQuery) Unique(unique bool) *BackupJobQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *BackupJobQuery) Order(o ...backupjob.OrderOption) *BackupJobQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryEvents chains the current query on the "events" edge. +func (_q *BackupJobQuery) QueryEvents() *BackupJobEventQuery { + query := (&BackupJobEventClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(backupjob.Table, backupjob.FieldID, selector), + sqlgraph.To(backupjobevent.Table, backupjobevent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, backupjob.EventsTable, backupjob.EventsColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first BackupJob entity from the query. +// Returns a *NotFoundError when no BackupJob was found. +func (_q *BackupJobQuery) First(ctx context.Context) (*BackupJob, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{backupjob.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *BackupJobQuery) FirstX(ctx context.Context) *BackupJob { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first BackupJob ID from the query. +// Returns a *NotFoundError when no BackupJob ID was found. +func (_q *BackupJobQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{backupjob.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *BackupJobQuery) FirstIDX(ctx context.Context) int { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single BackupJob entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one BackupJob entity is found. +// Returns a *NotFoundError when no BackupJob entities are found. +func (_q *BackupJobQuery) Only(ctx context.Context) (*BackupJob, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{backupjob.Label} + default: + return nil, &NotSingularError{backupjob.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *BackupJobQuery) OnlyX(ctx context.Context) *BackupJob { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only BackupJob ID in the query. +// Returns a *NotSingularError when more than one BackupJob ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *BackupJobQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{backupjob.Label} + default: + err = &NotSingularError{backupjob.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *BackupJobQuery) OnlyIDX(ctx context.Context) int { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of BackupJobs. +func (_q *BackupJobQuery) All(ctx context.Context) ([]*BackupJob, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*BackupJob, *BackupJobQuery]() + return withInterceptors[[]*BackupJob](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *BackupJobQuery) AllX(ctx context.Context) []*BackupJob { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of BackupJob IDs. +func (_q *BackupJobQuery) IDs(ctx context.Context) (ids []int, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(backupjob.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *BackupJobQuery) IDsX(ctx context.Context) []int { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *BackupJobQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*BackupJobQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *BackupJobQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *BackupJobQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *BackupJobQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the BackupJobQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *BackupJobQuery) Clone() *BackupJobQuery { + if _q == nil { + return nil + } + return &BackupJobQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]backupjob.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.BackupJob{}, _q.predicates...), + withEvents: _q.withEvents.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithEvents tells the query-builder to eager-load the nodes that are connected to +// the "events" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *BackupJobQuery) WithEvents(opts ...func(*BackupJobEventQuery)) *BackupJobQuery { + query := (&BackupJobEventClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withEvents = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// JobID string `json:"job_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.BackupJob.Query(). +// GroupBy(backupjob.FieldJobID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *BackupJobQuery) GroupBy(field string, fields ...string) *BackupJobGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &BackupJobGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = backupjob.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// JobID string `json:"job_id,omitempty"` +// } +// +// client.BackupJob.Query(). +// Select(backupjob.FieldJobID). +// Scan(ctx, &v) +func (_q *BackupJobQuery) Select(fields ...string) *BackupJobSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &BackupJobSelect{BackupJobQuery: _q} + sbuild.label = backupjob.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BackupJobSelect configured with the given aggregations. +func (_q *BackupJobQuery) Aggregate(fns ...AggregateFunc) *BackupJobSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *BackupJobQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !backupjob.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *BackupJobQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupJob, error) { + var ( + nodes = []*BackupJob{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withEvents != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*BackupJob).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &BackupJob{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withEvents; query != nil { + if err := _q.loadEvents(ctx, query, nodes, + func(n *BackupJob) { n.Edges.Events = []*BackupJobEvent{} }, + func(n *BackupJob, e *BackupJobEvent) { n.Edges.Events = append(n.Edges.Events, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *BackupJobQuery) loadEvents(ctx context.Context, query *BackupJobEventQuery, nodes []*BackupJob, init func(*BackupJob), assign func(*BackupJob, *BackupJobEvent)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[int]*BackupJob) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + if len(query.ctx.Fields) > 0 { + query.ctx.AppendFieldOnce(backupjobevent.FieldBackupJobID) + } + query.Where(predicate.BackupJobEvent(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(backupjob.EventsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.BackupJobID + node, ok := nodeids[fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "backup_job_id" returned %v for node %v`, fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (_q *BackupJobQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *BackupJobQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(backupjob.Table, backupjob.Columns, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupjob.FieldID) + for i := range fields { + if fields[i] != backupjob.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *BackupJobQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(backupjob.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = backupjob.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BackupJobGroupBy is the group-by builder for BackupJob entities. +type BackupJobGroupBy struct { + selector + build *BackupJobQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *BackupJobGroupBy) Aggregate(fns ...AggregateFunc) *BackupJobGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *BackupJobGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupJobQuery, *BackupJobGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *BackupJobGroupBy) sqlScan(ctx context.Context, root *BackupJobQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// BackupJobSelect is the builder for selecting fields of BackupJob entities. +type BackupJobSelect struct { + *BackupJobQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *BackupJobSelect) Aggregate(fns ...AggregateFunc) *BackupJobSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *BackupJobSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupJobQuery, *BackupJobSelect](ctx, _s.BackupJobQuery, _s, _s.inters, v) +} + +func (_s *BackupJobSelect) sqlScan(ctx context.Context, root *BackupJobQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backup/ent/backupjob_update.go b/backup/ent/backupjob_update.go new file mode 100644 index 000000000..5eeb5d765 --- /dev/null +++ b/backup/ent/backupjob_update.go @@ -0,0 +1,1121 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupJobUpdate is the builder for updating BackupJob entities. +type BackupJobUpdate struct { + config + hooks []Hook + mutation *BackupJobMutation +} + +// Where appends a list predicates to the BackupJobUpdate builder. +func (_u *BackupJobUpdate) Where(ps ...predicate.BackupJob) *BackupJobUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetJobID sets the "job_id" field. +func (_u *BackupJobUpdate) SetJobID(v string) *BackupJobUpdate { + _u.mutation.SetJobID(v) + return _u +} + +// SetNillableJobID sets the "job_id" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableJobID(v *string) *BackupJobUpdate { + if v != nil { + _u.SetJobID(*v) + } + return _u +} + +// SetBackupType sets the "backup_type" field. +func (_u *BackupJobUpdate) SetBackupType(v backupjob.BackupType) *BackupJobUpdate { + _u.mutation.SetBackupType(v) + return _u +} + +// SetNillableBackupType sets the "backup_type" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableBackupType(v *backupjob.BackupType) *BackupJobUpdate { + if v != nil { + _u.SetBackupType(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *BackupJobUpdate) SetStatus(v backupjob.Status) *BackupJobUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableStatus(v *backupjob.Status) *BackupJobUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetTriggeredBy sets the "triggered_by" field. +func (_u *BackupJobUpdate) SetTriggeredBy(v string) *BackupJobUpdate { + _u.mutation.SetTriggeredBy(v) + return _u +} + +// SetNillableTriggeredBy sets the "triggered_by" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableTriggeredBy(v *string) *BackupJobUpdate { + if v != nil { + _u.SetTriggeredBy(*v) + } + return _u +} + +// SetIdempotencyKey sets the "idempotency_key" field. +func (_u *BackupJobUpdate) SetIdempotencyKey(v string) *BackupJobUpdate { + _u.mutation.SetIdempotencyKey(v) + return _u +} + +// SetNillableIdempotencyKey sets the "idempotency_key" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableIdempotencyKey(v *string) *BackupJobUpdate { + if v != nil { + _u.SetIdempotencyKey(*v) + } + return _u +} + +// ClearIdempotencyKey clears the value of the "idempotency_key" field. +func (_u *BackupJobUpdate) ClearIdempotencyKey() *BackupJobUpdate { + _u.mutation.ClearIdempotencyKey() + return _u +} + +// SetUploadToS3 sets the "upload_to_s3" field. +func (_u *BackupJobUpdate) SetUploadToS3(v bool) *BackupJobUpdate { + _u.mutation.SetUploadToS3(v) + return _u +} + +// SetNillableUploadToS3 sets the "upload_to_s3" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableUploadToS3(v *bool) *BackupJobUpdate { + if v != nil { + _u.SetUploadToS3(*v) + } + return _u +} + +// SetStartedAt sets the "started_at" field. +func (_u *BackupJobUpdate) SetStartedAt(v time.Time) *BackupJobUpdate { + _u.mutation.SetStartedAt(v) + return _u +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableStartedAt(v *time.Time) *BackupJobUpdate { + if v != nil { + _u.SetStartedAt(*v) + } + return _u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (_u *BackupJobUpdate) ClearStartedAt() *BackupJobUpdate { + _u.mutation.ClearStartedAt() + return _u +} + +// SetFinishedAt sets the "finished_at" field. +func (_u *BackupJobUpdate) SetFinishedAt(v time.Time) *BackupJobUpdate { + _u.mutation.SetFinishedAt(v) + return _u +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableFinishedAt(v *time.Time) *BackupJobUpdate { + if v != nil { + _u.SetFinishedAt(*v) + } + return _u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (_u *BackupJobUpdate) ClearFinishedAt() *BackupJobUpdate { + _u.mutation.ClearFinishedAt() + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *BackupJobUpdate) SetErrorMessage(v string) *BackupJobUpdate { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableErrorMessage(v *string) *BackupJobUpdate { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *BackupJobUpdate) ClearErrorMessage() *BackupJobUpdate { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetArtifactLocalPath sets the "artifact_local_path" field. +func (_u *BackupJobUpdate) SetArtifactLocalPath(v string) *BackupJobUpdate { + _u.mutation.SetArtifactLocalPath(v) + return _u +} + +// SetNillableArtifactLocalPath sets the "artifact_local_path" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableArtifactLocalPath(v *string) *BackupJobUpdate { + if v != nil { + _u.SetArtifactLocalPath(*v) + } + return _u +} + +// ClearArtifactLocalPath clears the value of the "artifact_local_path" field. +func (_u *BackupJobUpdate) ClearArtifactLocalPath() *BackupJobUpdate { + _u.mutation.ClearArtifactLocalPath() + return _u +} + +// SetArtifactSizeBytes sets the "artifact_size_bytes" field. +func (_u *BackupJobUpdate) SetArtifactSizeBytes(v int64) *BackupJobUpdate { + _u.mutation.ResetArtifactSizeBytes() + _u.mutation.SetArtifactSizeBytes(v) + return _u +} + +// SetNillableArtifactSizeBytes sets the "artifact_size_bytes" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableArtifactSizeBytes(v *int64) *BackupJobUpdate { + if v != nil { + _u.SetArtifactSizeBytes(*v) + } + return _u +} + +// AddArtifactSizeBytes adds value to the "artifact_size_bytes" field. +func (_u *BackupJobUpdate) AddArtifactSizeBytes(v int64) *BackupJobUpdate { + _u.mutation.AddArtifactSizeBytes(v) + return _u +} + +// ClearArtifactSizeBytes clears the value of the "artifact_size_bytes" field. +func (_u *BackupJobUpdate) ClearArtifactSizeBytes() *BackupJobUpdate { + _u.mutation.ClearArtifactSizeBytes() + return _u +} + +// SetArtifactSha256 sets the "artifact_sha256" field. +func (_u *BackupJobUpdate) SetArtifactSha256(v string) *BackupJobUpdate { + _u.mutation.SetArtifactSha256(v) + return _u +} + +// SetNillableArtifactSha256 sets the "artifact_sha256" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableArtifactSha256(v *string) *BackupJobUpdate { + if v != nil { + _u.SetArtifactSha256(*v) + } + return _u +} + +// ClearArtifactSha256 clears the value of the "artifact_sha256" field. +func (_u *BackupJobUpdate) ClearArtifactSha256() *BackupJobUpdate { + _u.mutation.ClearArtifactSha256() + return _u +} + +// SetS3Bucket sets the "s3_bucket" field. +func (_u *BackupJobUpdate) SetS3Bucket(v string) *BackupJobUpdate { + _u.mutation.SetS3Bucket(v) + return _u +} + +// SetNillableS3Bucket sets the "s3_bucket" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableS3Bucket(v *string) *BackupJobUpdate { + if v != nil { + _u.SetS3Bucket(*v) + } + return _u +} + +// ClearS3Bucket clears the value of the "s3_bucket" field. +func (_u *BackupJobUpdate) ClearS3Bucket() *BackupJobUpdate { + _u.mutation.ClearS3Bucket() + return _u +} + +// SetS3Key sets the "s3_key" field. +func (_u *BackupJobUpdate) SetS3Key(v string) *BackupJobUpdate { + _u.mutation.SetS3Key(v) + return _u +} + +// SetNillableS3Key sets the "s3_key" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableS3Key(v *string) *BackupJobUpdate { + if v != nil { + _u.SetS3Key(*v) + } + return _u +} + +// ClearS3Key clears the value of the "s3_key" field. +func (_u *BackupJobUpdate) ClearS3Key() *BackupJobUpdate { + _u.mutation.ClearS3Key() + return _u +} + +// SetS3Etag sets the "s3_etag" field. +func (_u *BackupJobUpdate) SetS3Etag(v string) *BackupJobUpdate { + _u.mutation.SetS3Etag(v) + return _u +} + +// SetNillableS3Etag sets the "s3_etag" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableS3Etag(v *string) *BackupJobUpdate { + if v != nil { + _u.SetS3Etag(*v) + } + return _u +} + +// ClearS3Etag clears the value of the "s3_etag" field. +func (_u *BackupJobUpdate) ClearS3Etag() *BackupJobUpdate { + _u.mutation.ClearS3Etag() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupJobUpdate) SetUpdatedAt(v time.Time) *BackupJobUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddEventIDs adds the "events" edge to the BackupJobEvent entity by IDs. +func (_u *BackupJobUpdate) AddEventIDs(ids ...int) *BackupJobUpdate { + _u.mutation.AddEventIDs(ids...) + return _u +} + +// AddEvents adds the "events" edges to the BackupJobEvent entity. +func (_u *BackupJobUpdate) AddEvents(v ...*BackupJobEvent) *BackupJobUpdate { + ids := make([]int, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddEventIDs(ids...) +} + +// Mutation returns the BackupJobMutation object of the builder. +func (_u *BackupJobUpdate) Mutation() *BackupJobMutation { + return _u.mutation +} + +// ClearEvents clears all "events" edges to the BackupJobEvent entity. +func (_u *BackupJobUpdate) ClearEvents() *BackupJobUpdate { + _u.mutation.ClearEvents() + return _u +} + +// RemoveEventIDs removes the "events" edge to BackupJobEvent entities by IDs. +func (_u *BackupJobUpdate) RemoveEventIDs(ids ...int) *BackupJobUpdate { + _u.mutation.RemoveEventIDs(ids...) + return _u +} + +// RemoveEvents removes "events" edges to BackupJobEvent entities. +func (_u *BackupJobUpdate) RemoveEvents(v ...*BackupJobEvent) *BackupJobUpdate { + ids := make([]int, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveEventIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *BackupJobUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupJobUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *BackupJobUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupJobUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupJobUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backupjob.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupJobUpdate) check() error { + if v, ok := _u.mutation.BackupType(); ok { + if err := backupjob.BackupTypeValidator(v); err != nil { + return &ValidationError{Name: "backup_type", err: fmt.Errorf(`ent: validator failed for field "BackupJob.backup_type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := backupjob.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "BackupJob.status": %w`, err)} + } + } + return nil +} + +func (_u *BackupJobUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupjob.Table, backupjob.Columns, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.JobID(); ok { + _spec.SetField(backupjob.FieldJobID, field.TypeString, value) + } + if value, ok := _u.mutation.BackupType(); ok { + _spec.SetField(backupjob.FieldBackupType, field.TypeEnum, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(backupjob.FieldStatus, field.TypeEnum, value) + } + if value, ok := _u.mutation.TriggeredBy(); ok { + _spec.SetField(backupjob.FieldTriggeredBy, field.TypeString, value) + } + if value, ok := _u.mutation.IdempotencyKey(); ok { + _spec.SetField(backupjob.FieldIdempotencyKey, field.TypeString, value) + } + if _u.mutation.IdempotencyKeyCleared() { + _spec.ClearField(backupjob.FieldIdempotencyKey, field.TypeString) + } + if value, ok := _u.mutation.UploadToS3(); ok { + _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) + } + if value, ok := _u.mutation.StartedAt(); ok { + _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) + } + if _u.mutation.StartedAtCleared() { + _spec.ClearField(backupjob.FieldStartedAt, field.TypeTime) + } + if value, ok := _u.mutation.FinishedAt(); ok { + _spec.SetField(backupjob.FieldFinishedAt, field.TypeTime, value) + } + if _u.mutation.FinishedAtCleared() { + _spec.ClearField(backupjob.FieldFinishedAt, field.TypeTime) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(backupjob.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(backupjob.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.ArtifactLocalPath(); ok { + _spec.SetField(backupjob.FieldArtifactLocalPath, field.TypeString, value) + } + if _u.mutation.ArtifactLocalPathCleared() { + _spec.ClearField(backupjob.FieldArtifactLocalPath, field.TypeString) + } + if value, ok := _u.mutation.ArtifactSizeBytes(); ok { + _spec.SetField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedArtifactSizeBytes(); ok { + _spec.AddField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) + } + if _u.mutation.ArtifactSizeBytesCleared() { + _spec.ClearField(backupjob.FieldArtifactSizeBytes, field.TypeInt64) + } + if value, ok := _u.mutation.ArtifactSha256(); ok { + _spec.SetField(backupjob.FieldArtifactSha256, field.TypeString, value) + } + if _u.mutation.ArtifactSha256Cleared() { + _spec.ClearField(backupjob.FieldArtifactSha256, field.TypeString) + } + if value, ok := _u.mutation.S3Bucket(); ok { + _spec.SetField(backupjob.FieldS3Bucket, field.TypeString, value) + } + if _u.mutation.S3BucketCleared() { + _spec.ClearField(backupjob.FieldS3Bucket, field.TypeString) + } + if value, ok := _u.mutation.S3Key(); ok { + _spec.SetField(backupjob.FieldS3Key, field.TypeString, value) + } + if _u.mutation.S3KeyCleared() { + _spec.ClearField(backupjob.FieldS3Key, field.TypeString) + } + if value, ok := _u.mutation.S3Etag(); ok { + _spec.SetField(backupjob.FieldS3Etag, field.TypeString, value) + } + if _u.mutation.S3EtagCleared() { + _spec.ClearField(backupjob.FieldS3Etag, field.TypeString) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backupjob.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedEventsIDs(); len(nodes) > 0 && !_u.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupjob.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// BackupJobUpdateOne is the builder for updating a single BackupJob entity. +type BackupJobUpdateOne struct { + config + fields []string + hooks []Hook + mutation *BackupJobMutation +} + +// SetJobID sets the "job_id" field. +func (_u *BackupJobUpdateOne) SetJobID(v string) *BackupJobUpdateOne { + _u.mutation.SetJobID(v) + return _u +} + +// SetNillableJobID sets the "job_id" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableJobID(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetJobID(*v) + } + return _u +} + +// SetBackupType sets the "backup_type" field. +func (_u *BackupJobUpdateOne) SetBackupType(v backupjob.BackupType) *BackupJobUpdateOne { + _u.mutation.SetBackupType(v) + return _u +} + +// SetNillableBackupType sets the "backup_type" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableBackupType(v *backupjob.BackupType) *BackupJobUpdateOne { + if v != nil { + _u.SetBackupType(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *BackupJobUpdateOne) SetStatus(v backupjob.Status) *BackupJobUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableStatus(v *backupjob.Status) *BackupJobUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetTriggeredBy sets the "triggered_by" field. +func (_u *BackupJobUpdateOne) SetTriggeredBy(v string) *BackupJobUpdateOne { + _u.mutation.SetTriggeredBy(v) + return _u +} + +// SetNillableTriggeredBy sets the "triggered_by" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableTriggeredBy(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetTriggeredBy(*v) + } + return _u +} + +// SetIdempotencyKey sets the "idempotency_key" field. +func (_u *BackupJobUpdateOne) SetIdempotencyKey(v string) *BackupJobUpdateOne { + _u.mutation.SetIdempotencyKey(v) + return _u +} + +// SetNillableIdempotencyKey sets the "idempotency_key" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableIdempotencyKey(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetIdempotencyKey(*v) + } + return _u +} + +// ClearIdempotencyKey clears the value of the "idempotency_key" field. +func (_u *BackupJobUpdateOne) ClearIdempotencyKey() *BackupJobUpdateOne { + _u.mutation.ClearIdempotencyKey() + return _u +} + +// SetUploadToS3 sets the "upload_to_s3" field. +func (_u *BackupJobUpdateOne) SetUploadToS3(v bool) *BackupJobUpdateOne { + _u.mutation.SetUploadToS3(v) + return _u +} + +// SetNillableUploadToS3 sets the "upload_to_s3" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableUploadToS3(v *bool) *BackupJobUpdateOne { + if v != nil { + _u.SetUploadToS3(*v) + } + return _u +} + +// SetStartedAt sets the "started_at" field. +func (_u *BackupJobUpdateOne) SetStartedAt(v time.Time) *BackupJobUpdateOne { + _u.mutation.SetStartedAt(v) + return _u +} + +// SetNillableStartedAt sets the "started_at" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableStartedAt(v *time.Time) *BackupJobUpdateOne { + if v != nil { + _u.SetStartedAt(*v) + } + return _u +} + +// ClearStartedAt clears the value of the "started_at" field. +func (_u *BackupJobUpdateOne) ClearStartedAt() *BackupJobUpdateOne { + _u.mutation.ClearStartedAt() + return _u +} + +// SetFinishedAt sets the "finished_at" field. +func (_u *BackupJobUpdateOne) SetFinishedAt(v time.Time) *BackupJobUpdateOne { + _u.mutation.SetFinishedAt(v) + return _u +} + +// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableFinishedAt(v *time.Time) *BackupJobUpdateOne { + if v != nil { + _u.SetFinishedAt(*v) + } + return _u +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (_u *BackupJobUpdateOne) ClearFinishedAt() *BackupJobUpdateOne { + _u.mutation.ClearFinishedAt() + return _u +} + +// SetErrorMessage sets the "error_message" field. +func (_u *BackupJobUpdateOne) SetErrorMessage(v string) *BackupJobUpdateOne { + _u.mutation.SetErrorMessage(v) + return _u +} + +// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableErrorMessage(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetErrorMessage(*v) + } + return _u +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (_u *BackupJobUpdateOne) ClearErrorMessage() *BackupJobUpdateOne { + _u.mutation.ClearErrorMessage() + return _u +} + +// SetArtifactLocalPath sets the "artifact_local_path" field. +func (_u *BackupJobUpdateOne) SetArtifactLocalPath(v string) *BackupJobUpdateOne { + _u.mutation.SetArtifactLocalPath(v) + return _u +} + +// SetNillableArtifactLocalPath sets the "artifact_local_path" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableArtifactLocalPath(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetArtifactLocalPath(*v) + } + return _u +} + +// ClearArtifactLocalPath clears the value of the "artifact_local_path" field. +func (_u *BackupJobUpdateOne) ClearArtifactLocalPath() *BackupJobUpdateOne { + _u.mutation.ClearArtifactLocalPath() + return _u +} + +// SetArtifactSizeBytes sets the "artifact_size_bytes" field. +func (_u *BackupJobUpdateOne) SetArtifactSizeBytes(v int64) *BackupJobUpdateOne { + _u.mutation.ResetArtifactSizeBytes() + _u.mutation.SetArtifactSizeBytes(v) + return _u +} + +// SetNillableArtifactSizeBytes sets the "artifact_size_bytes" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableArtifactSizeBytes(v *int64) *BackupJobUpdateOne { + if v != nil { + _u.SetArtifactSizeBytes(*v) + } + return _u +} + +// AddArtifactSizeBytes adds value to the "artifact_size_bytes" field. +func (_u *BackupJobUpdateOne) AddArtifactSizeBytes(v int64) *BackupJobUpdateOne { + _u.mutation.AddArtifactSizeBytes(v) + return _u +} + +// ClearArtifactSizeBytes clears the value of the "artifact_size_bytes" field. +func (_u *BackupJobUpdateOne) ClearArtifactSizeBytes() *BackupJobUpdateOne { + _u.mutation.ClearArtifactSizeBytes() + return _u +} + +// SetArtifactSha256 sets the "artifact_sha256" field. +func (_u *BackupJobUpdateOne) SetArtifactSha256(v string) *BackupJobUpdateOne { + _u.mutation.SetArtifactSha256(v) + return _u +} + +// SetNillableArtifactSha256 sets the "artifact_sha256" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableArtifactSha256(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetArtifactSha256(*v) + } + return _u +} + +// ClearArtifactSha256 clears the value of the "artifact_sha256" field. +func (_u *BackupJobUpdateOne) ClearArtifactSha256() *BackupJobUpdateOne { + _u.mutation.ClearArtifactSha256() + return _u +} + +// SetS3Bucket sets the "s3_bucket" field. +func (_u *BackupJobUpdateOne) SetS3Bucket(v string) *BackupJobUpdateOne { + _u.mutation.SetS3Bucket(v) + return _u +} + +// SetNillableS3Bucket sets the "s3_bucket" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableS3Bucket(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetS3Bucket(*v) + } + return _u +} + +// ClearS3Bucket clears the value of the "s3_bucket" field. +func (_u *BackupJobUpdateOne) ClearS3Bucket() *BackupJobUpdateOne { + _u.mutation.ClearS3Bucket() + return _u +} + +// SetS3Key sets the "s3_key" field. +func (_u *BackupJobUpdateOne) SetS3Key(v string) *BackupJobUpdateOne { + _u.mutation.SetS3Key(v) + return _u +} + +// SetNillableS3Key sets the "s3_key" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableS3Key(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetS3Key(*v) + } + return _u +} + +// ClearS3Key clears the value of the "s3_key" field. +func (_u *BackupJobUpdateOne) ClearS3Key() *BackupJobUpdateOne { + _u.mutation.ClearS3Key() + return _u +} + +// SetS3Etag sets the "s3_etag" field. +func (_u *BackupJobUpdateOne) SetS3Etag(v string) *BackupJobUpdateOne { + _u.mutation.SetS3Etag(v) + return _u +} + +// SetNillableS3Etag sets the "s3_etag" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableS3Etag(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetS3Etag(*v) + } + return _u +} + +// ClearS3Etag clears the value of the "s3_etag" field. +func (_u *BackupJobUpdateOne) ClearS3Etag() *BackupJobUpdateOne { + _u.mutation.ClearS3Etag() + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupJobUpdateOne) SetUpdatedAt(v time.Time) *BackupJobUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// AddEventIDs adds the "events" edge to the BackupJobEvent entity by IDs. +func (_u *BackupJobUpdateOne) AddEventIDs(ids ...int) *BackupJobUpdateOne { + _u.mutation.AddEventIDs(ids...) + return _u +} + +// AddEvents adds the "events" edges to the BackupJobEvent entity. +func (_u *BackupJobUpdateOne) AddEvents(v ...*BackupJobEvent) *BackupJobUpdateOne { + ids := make([]int, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.AddEventIDs(ids...) +} + +// Mutation returns the BackupJobMutation object of the builder. +func (_u *BackupJobUpdateOne) Mutation() *BackupJobMutation { + return _u.mutation +} + +// ClearEvents clears all "events" edges to the BackupJobEvent entity. +func (_u *BackupJobUpdateOne) ClearEvents() *BackupJobUpdateOne { + _u.mutation.ClearEvents() + return _u +} + +// RemoveEventIDs removes the "events" edge to BackupJobEvent entities by IDs. +func (_u *BackupJobUpdateOne) RemoveEventIDs(ids ...int) *BackupJobUpdateOne { + _u.mutation.RemoveEventIDs(ids...) + return _u +} + +// RemoveEvents removes "events" edges to BackupJobEvent entities. +func (_u *BackupJobUpdateOne) RemoveEvents(v ...*BackupJobEvent) *BackupJobUpdateOne { + ids := make([]int, len(v)) + for i := range v { + ids[i] = v[i].ID + } + return _u.RemoveEventIDs(ids...) +} + +// Where appends a list predicates to the BackupJobUpdate builder. +func (_u *BackupJobUpdateOne) Where(ps ...predicate.BackupJob) *BackupJobUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *BackupJobUpdateOne) Select(field string, fields ...string) *BackupJobUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated BackupJob entity. +func (_u *BackupJobUpdateOne) Save(ctx context.Context) (*BackupJob, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupJobUpdateOne) SaveX(ctx context.Context) *BackupJob { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *BackupJobUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupJobUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupJobUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backupjob.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupJobUpdateOne) check() error { + if v, ok := _u.mutation.BackupType(); ok { + if err := backupjob.BackupTypeValidator(v); err != nil { + return &ValidationError{Name: "backup_type", err: fmt.Errorf(`ent: validator failed for field "BackupJob.backup_type": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := backupjob.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "BackupJob.status": %w`, err)} + } + } + return nil +} + +func (_u *BackupJobUpdateOne) sqlSave(ctx context.Context) (_node *BackupJob, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupjob.Table, backupjob.Columns, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupJob.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupjob.FieldID) + for _, f := range fields { + if !backupjob.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != backupjob.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.JobID(); ok { + _spec.SetField(backupjob.FieldJobID, field.TypeString, value) + } + if value, ok := _u.mutation.BackupType(); ok { + _spec.SetField(backupjob.FieldBackupType, field.TypeEnum, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(backupjob.FieldStatus, field.TypeEnum, value) + } + if value, ok := _u.mutation.TriggeredBy(); ok { + _spec.SetField(backupjob.FieldTriggeredBy, field.TypeString, value) + } + if value, ok := _u.mutation.IdempotencyKey(); ok { + _spec.SetField(backupjob.FieldIdempotencyKey, field.TypeString, value) + } + if _u.mutation.IdempotencyKeyCleared() { + _spec.ClearField(backupjob.FieldIdempotencyKey, field.TypeString) + } + if value, ok := _u.mutation.UploadToS3(); ok { + _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) + } + if value, ok := _u.mutation.StartedAt(); ok { + _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) + } + if _u.mutation.StartedAtCleared() { + _spec.ClearField(backupjob.FieldStartedAt, field.TypeTime) + } + if value, ok := _u.mutation.FinishedAt(); ok { + _spec.SetField(backupjob.FieldFinishedAt, field.TypeTime, value) + } + if _u.mutation.FinishedAtCleared() { + _spec.ClearField(backupjob.FieldFinishedAt, field.TypeTime) + } + if value, ok := _u.mutation.ErrorMessage(); ok { + _spec.SetField(backupjob.FieldErrorMessage, field.TypeString, value) + } + if _u.mutation.ErrorMessageCleared() { + _spec.ClearField(backupjob.FieldErrorMessage, field.TypeString) + } + if value, ok := _u.mutation.ArtifactLocalPath(); ok { + _spec.SetField(backupjob.FieldArtifactLocalPath, field.TypeString, value) + } + if _u.mutation.ArtifactLocalPathCleared() { + _spec.ClearField(backupjob.FieldArtifactLocalPath, field.TypeString) + } + if value, ok := _u.mutation.ArtifactSizeBytes(); ok { + _spec.SetField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) + } + if value, ok := _u.mutation.AddedArtifactSizeBytes(); ok { + _spec.AddField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) + } + if _u.mutation.ArtifactSizeBytesCleared() { + _spec.ClearField(backupjob.FieldArtifactSizeBytes, field.TypeInt64) + } + if value, ok := _u.mutation.ArtifactSha256(); ok { + _spec.SetField(backupjob.FieldArtifactSha256, field.TypeString, value) + } + if _u.mutation.ArtifactSha256Cleared() { + _spec.ClearField(backupjob.FieldArtifactSha256, field.TypeString) + } + if value, ok := _u.mutation.S3Bucket(); ok { + _spec.SetField(backupjob.FieldS3Bucket, field.TypeString, value) + } + if _u.mutation.S3BucketCleared() { + _spec.ClearField(backupjob.FieldS3Bucket, field.TypeString) + } + if value, ok := _u.mutation.S3Key(); ok { + _spec.SetField(backupjob.FieldS3Key, field.TypeString, value) + } + if _u.mutation.S3KeyCleared() { + _spec.ClearField(backupjob.FieldS3Key, field.TypeString) + } + if value, ok := _u.mutation.S3Etag(); ok { + _spec.SetField(backupjob.FieldS3Etag, field.TypeString, value) + } + if _u.mutation.S3EtagCleared() { + _spec.ClearField(backupjob.FieldS3Etag, field.TypeString) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backupjob.FieldUpdatedAt, field.TypeTime, value) + } + if _u.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.RemovedEventsIDs(); len(nodes) > 0 && !_u.mutation.EventsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.EventsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: backupjob.EventsTable, + Columns: []string{backupjob.EventsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &BackupJob{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupjob.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backup/ent/backupjobevent.go b/backup/ent/backupjobevent.go new file mode 100644 index 000000000..380a820fc --- /dev/null +++ b/backup/ent/backupjobevent.go @@ -0,0 +1,201 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" +) + +// BackupJobEvent is the model entity for the BackupJobEvent schema. +type BackupJobEvent struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // BackupJobID holds the value of the "backup_job_id" field. + BackupJobID int `json:"backup_job_id,omitempty"` + // Level holds the value of the "level" field. + Level backupjobevent.Level `json:"level,omitempty"` + // EventType holds the value of the "event_type" field. + EventType string `json:"event_type,omitempty"` + // Message holds the value of the "message" field. + Message string `json:"message,omitempty"` + // Payload holds the value of the "payload" field. + Payload string `json:"payload,omitempty"` + // EventTime holds the value of the "event_time" field. + EventTime time.Time `json:"event_time,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the BackupJobEventQuery when eager-loading is set. + Edges BackupJobEventEdges `json:"edges"` + selectValues sql.SelectValues +} + +// BackupJobEventEdges holds the relations/edges for other nodes in the graph. +type BackupJobEventEdges struct { + // Job holds the value of the job edge. + Job *BackupJob `json:"job,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// JobOrErr returns the Job value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e BackupJobEventEdges) JobOrErr() (*BackupJob, error) { + if e.Job != nil { + return e.Job, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: backupjob.Label} + } + return nil, &NotLoadedError{edge: "job"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*BackupJobEvent) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case backupjobevent.FieldID, backupjobevent.FieldBackupJobID: + values[i] = new(sql.NullInt64) + case backupjobevent.FieldLevel, backupjobevent.FieldEventType, backupjobevent.FieldMessage, backupjobevent.FieldPayload: + values[i] = new(sql.NullString) + case backupjobevent.FieldEventTime, backupjobevent.FieldCreatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the BackupJobEvent fields. +func (_m *BackupJobEvent) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case backupjobevent.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int(value.Int64) + case backupjobevent.FieldBackupJobID: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field backup_job_id", values[i]) + } else if value.Valid { + _m.BackupJobID = int(value.Int64) + } + case backupjobevent.FieldLevel: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field level", values[i]) + } else if value.Valid { + _m.Level = backupjobevent.Level(value.String) + } + case backupjobevent.FieldEventType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field event_type", values[i]) + } else if value.Valid { + _m.EventType = value.String + } + case backupjobevent.FieldMessage: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field message", values[i]) + } else if value.Valid { + _m.Message = value.String + } + case backupjobevent.FieldPayload: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field payload", values[i]) + } else if value.Valid { + _m.Payload = value.String + } + case backupjobevent.FieldEventTime: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field event_time", values[i]) + } else if value.Valid { + _m.EventTime = value.Time + } + case backupjobevent.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the BackupJobEvent. +// This includes values selected through modifiers, order, etc. +func (_m *BackupJobEvent) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// QueryJob queries the "job" edge of the BackupJobEvent entity. +func (_m *BackupJobEvent) QueryJob() *BackupJobQuery { + return NewBackupJobEventClient(_m.config).QueryJob(_m) +} + +// Update returns a builder for updating this BackupJobEvent. +// Note that you need to call BackupJobEvent.Unwrap() before calling this method if this BackupJobEvent +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *BackupJobEvent) Update() *BackupJobEventUpdateOne { + return NewBackupJobEventClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the BackupJobEvent entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *BackupJobEvent) Unwrap() *BackupJobEvent { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: BackupJobEvent is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *BackupJobEvent) String() string { + var builder strings.Builder + builder.WriteString("BackupJobEvent(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("backup_job_id=") + builder.WriteString(fmt.Sprintf("%v", _m.BackupJobID)) + builder.WriteString(", ") + builder.WriteString("level=") + builder.WriteString(fmt.Sprintf("%v", _m.Level)) + builder.WriteString(", ") + builder.WriteString("event_type=") + builder.WriteString(_m.EventType) + builder.WriteString(", ") + builder.WriteString("message=") + builder.WriteString(_m.Message) + builder.WriteString(", ") + builder.WriteString("payload=") + builder.WriteString(_m.Payload) + builder.WriteString(", ") + builder.WriteString("event_time=") + builder.WriteString(_m.EventTime.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// BackupJobEvents is a parsable slice of BackupJobEvent. +type BackupJobEvents []*BackupJobEvent diff --git a/backup/ent/backupjobevent/backupjobevent.go b/backup/ent/backupjobevent/backupjobevent.go new file mode 100644 index 000000000..7ea2f02e8 --- /dev/null +++ b/backup/ent/backupjobevent/backupjobevent.go @@ -0,0 +1,158 @@ +// Code generated by ent, DO NOT EDIT. + +package backupjobevent + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" +) + +const ( + // Label holds the string label denoting the backupjobevent type in the database. + Label = "backup_job_event" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldBackupJobID holds the string denoting the backup_job_id field in the database. + FieldBackupJobID = "backup_job_id" + // FieldLevel holds the string denoting the level field in the database. + FieldLevel = "level" + // FieldEventType holds the string denoting the event_type field in the database. + FieldEventType = "event_type" + // FieldMessage holds the string denoting the message field in the database. + FieldMessage = "message" + // FieldPayload holds the string denoting the payload field in the database. + FieldPayload = "payload" + // FieldEventTime holds the string denoting the event_time field in the database. + FieldEventTime = "event_time" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // EdgeJob holds the string denoting the job edge name in mutations. + EdgeJob = "job" + // Table holds the table name of the backupjobevent in the database. + Table = "backup_job_events" + // JobTable is the table that holds the job relation/edge. + JobTable = "backup_job_events" + // JobInverseTable is the table name for the BackupJob entity. + // It exists in this package in order to avoid circular dependency with the "backupjob" package. + JobInverseTable = "backup_jobs" + // JobColumn is the table column denoting the job relation/edge. + JobColumn = "backup_job_id" +) + +// Columns holds all SQL columns for backupjobevent fields. +var Columns = []string{ + FieldID, + FieldBackupJobID, + FieldLevel, + FieldEventType, + FieldMessage, + FieldPayload, + FieldEventTime, + FieldCreatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultEventType holds the default value on creation for the "event_type" field. + DefaultEventType string + // DefaultEventTime holds the default value on creation for the "event_time" field. + DefaultEventTime func() time.Time + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time +) + +// Level defines the type for the "level" enum field. +type Level string + +// LevelInfo is the default value of the Level enum. +const DefaultLevel = LevelInfo + +// Level values. +const ( + LevelInfo Level = "info" + LevelWarning Level = "warning" + LevelError Level = "error" +) + +func (l Level) String() string { + return string(l) +} + +// LevelValidator is a validator for the "level" field enum values. It is called by the builders before save. +func LevelValidator(l Level) error { + switch l { + case LevelInfo, LevelWarning, LevelError: + return nil + default: + return fmt.Errorf("backupjobevent: invalid enum value for level field: %q", l) + } +} + +// OrderOption defines the ordering options for the BackupJobEvent queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByBackupJobID orders the results by the backup_job_id field. +func ByBackupJobID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBackupJobID, opts...).ToFunc() +} + +// ByLevel orders the results by the level field. +func ByLevel(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLevel, opts...).ToFunc() +} + +// ByEventType orders the results by the event_type field. +func ByEventType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEventType, opts...).ToFunc() +} + +// ByMessage orders the results by the message field. +func ByMessage(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMessage, opts...).ToFunc() +} + +// ByPayload orders the results by the payload field. +func ByPayload(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPayload, opts...).ToFunc() +} + +// ByEventTime orders the results by the event_time field. +func ByEventTime(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEventTime, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByJobField orders the results by job field. +func ByJobField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newJobStep(), sql.OrderByField(field, opts...)) + } +} +func newJobStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(JobInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, JobTable, JobColumn), + ) +} diff --git a/backup/ent/backupjobevent/where.go b/backup/ent/backupjobevent/where.go new file mode 100644 index 000000000..756e67841 --- /dev/null +++ b/backup/ent/backupjobevent/where.go @@ -0,0 +1,449 @@ +// Code generated by ent, DO NOT EDIT. + +package backupjobevent + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLTE(FieldID, id)) +} + +// BackupJobID applies equality check predicate on the "backup_job_id" field. It's identical to BackupJobIDEQ. +func BackupJobID(v int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldBackupJobID, v)) +} + +// EventType applies equality check predicate on the "event_type" field. It's identical to EventTypeEQ. +func EventType(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldEventType, v)) +} + +// Message applies equality check predicate on the "message" field. It's identical to MessageEQ. +func Message(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldMessage, v)) +} + +// Payload applies equality check predicate on the "payload" field. It's identical to PayloadEQ. +func Payload(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldPayload, v)) +} + +// EventTime applies equality check predicate on the "event_time" field. It's identical to EventTimeEQ. +func EventTime(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldEventTime, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldCreatedAt, v)) +} + +// BackupJobIDEQ applies the EQ predicate on the "backup_job_id" field. +func BackupJobIDEQ(v int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldBackupJobID, v)) +} + +// BackupJobIDNEQ applies the NEQ predicate on the "backup_job_id" field. +func BackupJobIDNEQ(v int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldBackupJobID, v)) +} + +// BackupJobIDIn applies the In predicate on the "backup_job_id" field. +func BackupJobIDIn(vs ...int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldBackupJobID, vs...)) +} + +// BackupJobIDNotIn applies the NotIn predicate on the "backup_job_id" field. +func BackupJobIDNotIn(vs ...int) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldBackupJobID, vs...)) +} + +// LevelEQ applies the EQ predicate on the "level" field. +func LevelEQ(v Level) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldLevel, v)) +} + +// LevelNEQ applies the NEQ predicate on the "level" field. +func LevelNEQ(v Level) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldLevel, v)) +} + +// LevelIn applies the In predicate on the "level" field. +func LevelIn(vs ...Level) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldLevel, vs...)) +} + +// LevelNotIn applies the NotIn predicate on the "level" field. +func LevelNotIn(vs ...Level) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldLevel, vs...)) +} + +// EventTypeEQ applies the EQ predicate on the "event_type" field. +func EventTypeEQ(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldEventType, v)) +} + +// EventTypeNEQ applies the NEQ predicate on the "event_type" field. +func EventTypeNEQ(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldEventType, v)) +} + +// EventTypeIn applies the In predicate on the "event_type" field. +func EventTypeIn(vs ...string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldEventType, vs...)) +} + +// EventTypeNotIn applies the NotIn predicate on the "event_type" field. +func EventTypeNotIn(vs ...string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldEventType, vs...)) +} + +// EventTypeGT applies the GT predicate on the "event_type" field. +func EventTypeGT(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGT(FieldEventType, v)) +} + +// EventTypeGTE applies the GTE predicate on the "event_type" field. +func EventTypeGTE(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGTE(FieldEventType, v)) +} + +// EventTypeLT applies the LT predicate on the "event_type" field. +func EventTypeLT(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLT(FieldEventType, v)) +} + +// EventTypeLTE applies the LTE predicate on the "event_type" field. +func EventTypeLTE(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLTE(FieldEventType, v)) +} + +// EventTypeContains applies the Contains predicate on the "event_type" field. +func EventTypeContains(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldContains(FieldEventType, v)) +} + +// EventTypeHasPrefix applies the HasPrefix predicate on the "event_type" field. +func EventTypeHasPrefix(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldHasPrefix(FieldEventType, v)) +} + +// EventTypeHasSuffix applies the HasSuffix predicate on the "event_type" field. +func EventTypeHasSuffix(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldHasSuffix(FieldEventType, v)) +} + +// EventTypeEqualFold applies the EqualFold predicate on the "event_type" field. +func EventTypeEqualFold(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEqualFold(FieldEventType, v)) +} + +// EventTypeContainsFold applies the ContainsFold predicate on the "event_type" field. +func EventTypeContainsFold(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldContainsFold(FieldEventType, v)) +} + +// MessageEQ applies the EQ predicate on the "message" field. +func MessageEQ(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldMessage, v)) +} + +// MessageNEQ applies the NEQ predicate on the "message" field. +func MessageNEQ(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldMessage, v)) +} + +// MessageIn applies the In predicate on the "message" field. +func MessageIn(vs ...string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldMessage, vs...)) +} + +// MessageNotIn applies the NotIn predicate on the "message" field. +func MessageNotIn(vs ...string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldMessage, vs...)) +} + +// MessageGT applies the GT predicate on the "message" field. +func MessageGT(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGT(FieldMessage, v)) +} + +// MessageGTE applies the GTE predicate on the "message" field. +func MessageGTE(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGTE(FieldMessage, v)) +} + +// MessageLT applies the LT predicate on the "message" field. +func MessageLT(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLT(FieldMessage, v)) +} + +// MessageLTE applies the LTE predicate on the "message" field. +func MessageLTE(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLTE(FieldMessage, v)) +} + +// MessageContains applies the Contains predicate on the "message" field. +func MessageContains(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldContains(FieldMessage, v)) +} + +// MessageHasPrefix applies the HasPrefix predicate on the "message" field. +func MessageHasPrefix(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldHasPrefix(FieldMessage, v)) +} + +// MessageHasSuffix applies the HasSuffix predicate on the "message" field. +func MessageHasSuffix(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldHasSuffix(FieldMessage, v)) +} + +// MessageEqualFold applies the EqualFold predicate on the "message" field. +func MessageEqualFold(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEqualFold(FieldMessage, v)) +} + +// MessageContainsFold applies the ContainsFold predicate on the "message" field. +func MessageContainsFold(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldContainsFold(FieldMessage, v)) +} + +// PayloadEQ applies the EQ predicate on the "payload" field. +func PayloadEQ(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldPayload, v)) +} + +// PayloadNEQ applies the NEQ predicate on the "payload" field. +func PayloadNEQ(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldPayload, v)) +} + +// PayloadIn applies the In predicate on the "payload" field. +func PayloadIn(vs ...string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldPayload, vs...)) +} + +// PayloadNotIn applies the NotIn predicate on the "payload" field. +func PayloadNotIn(vs ...string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldPayload, vs...)) +} + +// PayloadGT applies the GT predicate on the "payload" field. +func PayloadGT(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGT(FieldPayload, v)) +} + +// PayloadGTE applies the GTE predicate on the "payload" field. +func PayloadGTE(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGTE(FieldPayload, v)) +} + +// PayloadLT applies the LT predicate on the "payload" field. +func PayloadLT(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLT(FieldPayload, v)) +} + +// PayloadLTE applies the LTE predicate on the "payload" field. +func PayloadLTE(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLTE(FieldPayload, v)) +} + +// PayloadContains applies the Contains predicate on the "payload" field. +func PayloadContains(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldContains(FieldPayload, v)) +} + +// PayloadHasPrefix applies the HasPrefix predicate on the "payload" field. +func PayloadHasPrefix(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldHasPrefix(FieldPayload, v)) +} + +// PayloadHasSuffix applies the HasSuffix predicate on the "payload" field. +func PayloadHasSuffix(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldHasSuffix(FieldPayload, v)) +} + +// PayloadIsNil applies the IsNil predicate on the "payload" field. +func PayloadIsNil() predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIsNull(FieldPayload)) +} + +// PayloadNotNil applies the NotNil predicate on the "payload" field. +func PayloadNotNil() predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotNull(FieldPayload)) +} + +// PayloadEqualFold applies the EqualFold predicate on the "payload" field. +func PayloadEqualFold(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEqualFold(FieldPayload, v)) +} + +// PayloadContainsFold applies the ContainsFold predicate on the "payload" field. +func PayloadContainsFold(v string) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldContainsFold(FieldPayload, v)) +} + +// EventTimeEQ applies the EQ predicate on the "event_time" field. +func EventTimeEQ(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldEventTime, v)) +} + +// EventTimeNEQ applies the NEQ predicate on the "event_time" field. +func EventTimeNEQ(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldEventTime, v)) +} + +// EventTimeIn applies the In predicate on the "event_time" field. +func EventTimeIn(vs ...time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldEventTime, vs...)) +} + +// EventTimeNotIn applies the NotIn predicate on the "event_time" field. +func EventTimeNotIn(vs ...time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldEventTime, vs...)) +} + +// EventTimeGT applies the GT predicate on the "event_time" field. +func EventTimeGT(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGT(FieldEventTime, v)) +} + +// EventTimeGTE applies the GTE predicate on the "event_time" field. +func EventTimeGTE(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGTE(FieldEventTime, v)) +} + +// EventTimeLT applies the LT predicate on the "event_time" field. +func EventTimeLT(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLT(FieldEventTime, v)) +} + +// EventTimeLTE applies the LTE predicate on the "event_time" field. +func EventTimeLTE(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLTE(FieldEventTime, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.FieldLTE(FieldCreatedAt, v)) +} + +// HasJob applies the HasEdge predicate on the "job" edge. +func HasJob() predicate.BackupJobEvent { + return predicate.BackupJobEvent(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, JobTable, JobColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasJobWith applies the HasEdge predicate on the "job" edge with a given conditions (other predicates). +func HasJobWith(preds ...predicate.BackupJob) predicate.BackupJobEvent { + return predicate.BackupJobEvent(func(s *sql.Selector) { + step := newJobStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.BackupJobEvent) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.BackupJobEvent) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.BackupJobEvent) predicate.BackupJobEvent { + return predicate.BackupJobEvent(sql.NotPredicates(p)) +} diff --git a/backup/ent/backupjobevent_create.go b/backup/ent/backupjobevent_create.go new file mode 100644 index 000000000..6fc8c64f0 --- /dev/null +++ b/backup/ent/backupjobevent_create.go @@ -0,0 +1,354 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" +) + +// BackupJobEventCreate is the builder for creating a BackupJobEvent entity. +type BackupJobEventCreate struct { + config + mutation *BackupJobEventMutation + hooks []Hook +} + +// SetBackupJobID sets the "backup_job_id" field. +func (_c *BackupJobEventCreate) SetBackupJobID(v int) *BackupJobEventCreate { + _c.mutation.SetBackupJobID(v) + return _c +} + +// SetLevel sets the "level" field. +func (_c *BackupJobEventCreate) SetLevel(v backupjobevent.Level) *BackupJobEventCreate { + _c.mutation.SetLevel(v) + return _c +} + +// SetNillableLevel sets the "level" field if the given value is not nil. +func (_c *BackupJobEventCreate) SetNillableLevel(v *backupjobevent.Level) *BackupJobEventCreate { + if v != nil { + _c.SetLevel(*v) + } + return _c +} + +// SetEventType sets the "event_type" field. +func (_c *BackupJobEventCreate) SetEventType(v string) *BackupJobEventCreate { + _c.mutation.SetEventType(v) + return _c +} + +// SetNillableEventType sets the "event_type" field if the given value is not nil. +func (_c *BackupJobEventCreate) SetNillableEventType(v *string) *BackupJobEventCreate { + if v != nil { + _c.SetEventType(*v) + } + return _c +} + +// SetMessage sets the "message" field. +func (_c *BackupJobEventCreate) SetMessage(v string) *BackupJobEventCreate { + _c.mutation.SetMessage(v) + return _c +} + +// SetPayload sets the "payload" field. +func (_c *BackupJobEventCreate) SetPayload(v string) *BackupJobEventCreate { + _c.mutation.SetPayload(v) + return _c +} + +// SetNillablePayload sets the "payload" field if the given value is not nil. +func (_c *BackupJobEventCreate) SetNillablePayload(v *string) *BackupJobEventCreate { + if v != nil { + _c.SetPayload(*v) + } + return _c +} + +// SetEventTime sets the "event_time" field. +func (_c *BackupJobEventCreate) SetEventTime(v time.Time) *BackupJobEventCreate { + _c.mutation.SetEventTime(v) + return _c +} + +// SetNillableEventTime sets the "event_time" field if the given value is not nil. +func (_c *BackupJobEventCreate) SetNillableEventTime(v *time.Time) *BackupJobEventCreate { + if v != nil { + _c.SetEventTime(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *BackupJobEventCreate) SetCreatedAt(v time.Time) *BackupJobEventCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *BackupJobEventCreate) SetNillableCreatedAt(v *time.Time) *BackupJobEventCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetJobID sets the "job" edge to the BackupJob entity by ID. +func (_c *BackupJobEventCreate) SetJobID(id int) *BackupJobEventCreate { + _c.mutation.SetJobID(id) + return _c +} + +// SetJob sets the "job" edge to the BackupJob entity. +func (_c *BackupJobEventCreate) SetJob(v *BackupJob) *BackupJobEventCreate { + return _c.SetJobID(v.ID) +} + +// Mutation returns the BackupJobEventMutation object of the builder. +func (_c *BackupJobEventCreate) Mutation() *BackupJobEventMutation { + return _c.mutation +} + +// Save creates the BackupJobEvent in the database. +func (_c *BackupJobEventCreate) Save(ctx context.Context) (*BackupJobEvent, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *BackupJobEventCreate) SaveX(ctx context.Context) *BackupJobEvent { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupJobEventCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupJobEventCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *BackupJobEventCreate) defaults() { + if _, ok := _c.mutation.Level(); !ok { + v := backupjobevent.DefaultLevel + _c.mutation.SetLevel(v) + } + if _, ok := _c.mutation.EventType(); !ok { + v := backupjobevent.DefaultEventType + _c.mutation.SetEventType(v) + } + if _, ok := _c.mutation.EventTime(); !ok { + v := backupjobevent.DefaultEventTime() + _c.mutation.SetEventTime(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := backupjobevent.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *BackupJobEventCreate) check() error { + if _, ok := _c.mutation.BackupJobID(); !ok { + return &ValidationError{Name: "backup_job_id", err: errors.New(`ent: missing required field "BackupJobEvent.backup_job_id"`)} + } + if _, ok := _c.mutation.Level(); !ok { + return &ValidationError{Name: "level", err: errors.New(`ent: missing required field "BackupJobEvent.level"`)} + } + if v, ok := _c.mutation.Level(); ok { + if err := backupjobevent.LevelValidator(v); err != nil { + return &ValidationError{Name: "level", err: fmt.Errorf(`ent: validator failed for field "BackupJobEvent.level": %w`, err)} + } + } + if _, ok := _c.mutation.EventType(); !ok { + return &ValidationError{Name: "event_type", err: errors.New(`ent: missing required field "BackupJobEvent.event_type"`)} + } + if _, ok := _c.mutation.Message(); !ok { + return &ValidationError{Name: "message", err: errors.New(`ent: missing required field "BackupJobEvent.message"`)} + } + if _, ok := _c.mutation.EventTime(); !ok { + return &ValidationError{Name: "event_time", err: errors.New(`ent: missing required field "BackupJobEvent.event_time"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupJobEvent.created_at"`)} + } + if len(_c.mutation.JobIDs()) == 0 { + return &ValidationError{Name: "job", err: errors.New(`ent: missing required edge "BackupJobEvent.job"`)} + } + return nil +} + +func (_c *BackupJobEventCreate) sqlSave(ctx context.Context) (*BackupJobEvent, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *BackupJobEventCreate) createSpec() (*BackupJobEvent, *sqlgraph.CreateSpec) { + var ( + _node = &BackupJobEvent{config: _c.config} + _spec = sqlgraph.NewCreateSpec(backupjobevent.Table, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) + ) + if value, ok := _c.mutation.Level(); ok { + _spec.SetField(backupjobevent.FieldLevel, field.TypeEnum, value) + _node.Level = value + } + if value, ok := _c.mutation.EventType(); ok { + _spec.SetField(backupjobevent.FieldEventType, field.TypeString, value) + _node.EventType = value + } + if value, ok := _c.mutation.Message(); ok { + _spec.SetField(backupjobevent.FieldMessage, field.TypeString, value) + _node.Message = value + } + if value, ok := _c.mutation.Payload(); ok { + _spec.SetField(backupjobevent.FieldPayload, field.TypeString, value) + _node.Payload = value + } + if value, ok := _c.mutation.EventTime(); ok { + _spec.SetField(backupjobevent.FieldEventTime, field.TypeTime, value) + _node.EventTime = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(backupjobevent.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if nodes := _c.mutation.JobIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: backupjobevent.JobTable, + Columns: []string{backupjobevent.JobColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.BackupJobID = nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// BackupJobEventCreateBulk is the builder for creating many BackupJobEvent entities in bulk. +type BackupJobEventCreateBulk struct { + config + err error + builders []*BackupJobEventCreate +} + +// Save creates the BackupJobEvent entities in the database. +func (_c *BackupJobEventCreateBulk) Save(ctx context.Context) ([]*BackupJobEvent, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*BackupJobEvent, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BackupJobEventMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *BackupJobEventCreateBulk) SaveX(ctx context.Context) []*BackupJobEvent { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupJobEventCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupJobEventCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupjobevent_delete.go b/backup/ent/backupjobevent_delete.go new file mode 100644 index 000000000..55c0ef9bd --- /dev/null +++ b/backup/ent/backupjobevent_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupJobEventDelete is the builder for deleting a BackupJobEvent entity. +type BackupJobEventDelete struct { + config + hooks []Hook + mutation *BackupJobEventMutation +} + +// Where appends a list predicates to the BackupJobEventDelete builder. +func (_d *BackupJobEventDelete) Where(ps ...predicate.BackupJobEvent) *BackupJobEventDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *BackupJobEventDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupJobEventDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *BackupJobEventDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(backupjobevent.Table, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// BackupJobEventDeleteOne is the builder for deleting a single BackupJobEvent entity. +type BackupJobEventDeleteOne struct { + _d *BackupJobEventDelete +} + +// Where appends a list predicates to the BackupJobEventDelete builder. +func (_d *BackupJobEventDeleteOne) Where(ps ...predicate.BackupJobEvent) *BackupJobEventDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *BackupJobEventDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{backupjobevent.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupJobEventDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupjobevent_query.go b/backup/ent/backupjobevent_query.go new file mode 100644 index 000000000..6f4b512ff --- /dev/null +++ b/backup/ent/backupjobevent_query.go @@ -0,0 +1,606 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupJobEventQuery is the builder for querying BackupJobEvent entities. +type BackupJobEventQuery struct { + config + ctx *QueryContext + order []backupjobevent.OrderOption + inters []Interceptor + predicates []predicate.BackupJobEvent + withJob *BackupJobQuery + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the BackupJobEventQuery builder. +func (_q *BackupJobEventQuery) Where(ps ...predicate.BackupJobEvent) *BackupJobEventQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *BackupJobEventQuery) Limit(limit int) *BackupJobEventQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *BackupJobEventQuery) Offset(offset int) *BackupJobEventQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *BackupJobEventQuery) Unique(unique bool) *BackupJobEventQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *BackupJobEventQuery) Order(o ...backupjobevent.OrderOption) *BackupJobEventQuery { + _q.order = append(_q.order, o...) + return _q +} + +// QueryJob chains the current query on the "job" edge. +func (_q *BackupJobEventQuery) QueryJob() *BackupJobQuery { + query := (&BackupJobClient{config: _q.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + selector := _q.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(backupjobevent.Table, backupjobevent.FieldID, selector), + sqlgraph.To(backupjob.Table, backupjob.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, backupjobevent.JobTable, backupjobevent.JobColumn), + ) + fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first BackupJobEvent entity from the query. +// Returns a *NotFoundError when no BackupJobEvent was found. +func (_q *BackupJobEventQuery) First(ctx context.Context) (*BackupJobEvent, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{backupjobevent.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *BackupJobEventQuery) FirstX(ctx context.Context) *BackupJobEvent { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first BackupJobEvent ID from the query. +// Returns a *NotFoundError when no BackupJobEvent ID was found. +func (_q *BackupJobEventQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{backupjobevent.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *BackupJobEventQuery) FirstIDX(ctx context.Context) int { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single BackupJobEvent entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one BackupJobEvent entity is found. +// Returns a *NotFoundError when no BackupJobEvent entities are found. +func (_q *BackupJobEventQuery) Only(ctx context.Context) (*BackupJobEvent, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{backupjobevent.Label} + default: + return nil, &NotSingularError{backupjobevent.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *BackupJobEventQuery) OnlyX(ctx context.Context) *BackupJobEvent { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only BackupJobEvent ID in the query. +// Returns a *NotSingularError when more than one BackupJobEvent ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *BackupJobEventQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{backupjobevent.Label} + default: + err = &NotSingularError{backupjobevent.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *BackupJobEventQuery) OnlyIDX(ctx context.Context) int { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of BackupJobEvents. +func (_q *BackupJobEventQuery) All(ctx context.Context) ([]*BackupJobEvent, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*BackupJobEvent, *BackupJobEventQuery]() + return withInterceptors[[]*BackupJobEvent](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *BackupJobEventQuery) AllX(ctx context.Context) []*BackupJobEvent { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of BackupJobEvent IDs. +func (_q *BackupJobEventQuery) IDs(ctx context.Context) (ids []int, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(backupjobevent.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *BackupJobEventQuery) IDsX(ctx context.Context) []int { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *BackupJobEventQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*BackupJobEventQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *BackupJobEventQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *BackupJobEventQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *BackupJobEventQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the BackupJobEventQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *BackupJobEventQuery) Clone() *BackupJobEventQuery { + if _q == nil { + return nil + } + return &BackupJobEventQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]backupjobevent.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.BackupJobEvent{}, _q.predicates...), + withJob: _q.withJob.Clone(), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// WithJob tells the query-builder to eager-load the nodes that are connected to +// the "job" edge. The optional arguments are used to configure the query builder of the edge. +func (_q *BackupJobEventQuery) WithJob(opts ...func(*BackupJobQuery)) *BackupJobEventQuery { + query := (&BackupJobClient{config: _q.config}).Query() + for _, opt := range opts { + opt(query) + } + _q.withJob = query + return _q +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// BackupJobID int `json:"backup_job_id,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.BackupJobEvent.Query(). +// GroupBy(backupjobevent.FieldBackupJobID). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *BackupJobEventQuery) GroupBy(field string, fields ...string) *BackupJobEventGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &BackupJobEventGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = backupjobevent.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// BackupJobID int `json:"backup_job_id,omitempty"` +// } +// +// client.BackupJobEvent.Query(). +// Select(backupjobevent.FieldBackupJobID). +// Scan(ctx, &v) +func (_q *BackupJobEventQuery) Select(fields ...string) *BackupJobEventSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &BackupJobEventSelect{BackupJobEventQuery: _q} + sbuild.label = backupjobevent.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BackupJobEventSelect configured with the given aggregations. +func (_q *BackupJobEventQuery) Aggregate(fns ...AggregateFunc) *BackupJobEventSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *BackupJobEventQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !backupjobevent.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *BackupJobEventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupJobEvent, error) { + var ( + nodes = []*BackupJobEvent{} + _spec = _q.querySpec() + loadedTypes = [1]bool{ + _q.withJob != nil, + } + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*BackupJobEvent).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &BackupJobEvent{config: _q.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := _q.withJob; query != nil { + if err := _q.loadJob(ctx, query, nodes, nil, + func(n *BackupJobEvent, e *BackupJob) { n.Edges.Job = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (_q *BackupJobEventQuery) loadJob(ctx context.Context, query *BackupJobQuery, nodes []*BackupJobEvent, init func(*BackupJobEvent), assign func(*BackupJobEvent, *BackupJob)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*BackupJobEvent) + for i := range nodes { + fk := nodes[i].BackupJobID + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(backupjob.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "backup_job_id" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (_q *BackupJobEventQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *BackupJobEventQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(backupjobevent.Table, backupjobevent.Columns, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupjobevent.FieldID) + for i := range fields { + if fields[i] != backupjobevent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + if _q.withJob != nil { + _spec.Node.AddColumnOnce(backupjobevent.FieldBackupJobID) + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *BackupJobEventQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(backupjobevent.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = backupjobevent.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BackupJobEventGroupBy is the group-by builder for BackupJobEvent entities. +type BackupJobEventGroupBy struct { + selector + build *BackupJobEventQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *BackupJobEventGroupBy) Aggregate(fns ...AggregateFunc) *BackupJobEventGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *BackupJobEventGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupJobEventQuery, *BackupJobEventGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *BackupJobEventGroupBy) sqlScan(ctx context.Context, root *BackupJobEventQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// BackupJobEventSelect is the builder for selecting fields of BackupJobEvent entities. +type BackupJobEventSelect struct { + *BackupJobEventQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *BackupJobEventSelect) Aggregate(fns ...AggregateFunc) *BackupJobEventSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *BackupJobEventSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupJobEventQuery, *BackupJobEventSelect](ctx, _s.BackupJobEventQuery, _s, _s.inters, v) +} + +func (_s *BackupJobEventSelect) sqlScan(ctx context.Context, root *BackupJobEventQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backup/ent/backupjobevent_update.go b/backup/ent/backupjobevent_update.go new file mode 100644 index 000000000..937e6f11f --- /dev/null +++ b/backup/ent/backupjobevent_update.go @@ -0,0 +1,517 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupJobEventUpdate is the builder for updating BackupJobEvent entities. +type BackupJobEventUpdate struct { + config + hooks []Hook + mutation *BackupJobEventMutation +} + +// Where appends a list predicates to the BackupJobEventUpdate builder. +func (_u *BackupJobEventUpdate) Where(ps ...predicate.BackupJobEvent) *BackupJobEventUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetBackupJobID sets the "backup_job_id" field. +func (_u *BackupJobEventUpdate) SetBackupJobID(v int) *BackupJobEventUpdate { + _u.mutation.SetBackupJobID(v) + return _u +} + +// SetNillableBackupJobID sets the "backup_job_id" field if the given value is not nil. +func (_u *BackupJobEventUpdate) SetNillableBackupJobID(v *int) *BackupJobEventUpdate { + if v != nil { + _u.SetBackupJobID(*v) + } + return _u +} + +// SetLevel sets the "level" field. +func (_u *BackupJobEventUpdate) SetLevel(v backupjobevent.Level) *BackupJobEventUpdate { + _u.mutation.SetLevel(v) + return _u +} + +// SetNillableLevel sets the "level" field if the given value is not nil. +func (_u *BackupJobEventUpdate) SetNillableLevel(v *backupjobevent.Level) *BackupJobEventUpdate { + if v != nil { + _u.SetLevel(*v) + } + return _u +} + +// SetEventType sets the "event_type" field. +func (_u *BackupJobEventUpdate) SetEventType(v string) *BackupJobEventUpdate { + _u.mutation.SetEventType(v) + return _u +} + +// SetNillableEventType sets the "event_type" field if the given value is not nil. +func (_u *BackupJobEventUpdate) SetNillableEventType(v *string) *BackupJobEventUpdate { + if v != nil { + _u.SetEventType(*v) + } + return _u +} + +// SetMessage sets the "message" field. +func (_u *BackupJobEventUpdate) SetMessage(v string) *BackupJobEventUpdate { + _u.mutation.SetMessage(v) + return _u +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (_u *BackupJobEventUpdate) SetNillableMessage(v *string) *BackupJobEventUpdate { + if v != nil { + _u.SetMessage(*v) + } + return _u +} + +// SetPayload sets the "payload" field. +func (_u *BackupJobEventUpdate) SetPayload(v string) *BackupJobEventUpdate { + _u.mutation.SetPayload(v) + return _u +} + +// SetNillablePayload sets the "payload" field if the given value is not nil. +func (_u *BackupJobEventUpdate) SetNillablePayload(v *string) *BackupJobEventUpdate { + if v != nil { + _u.SetPayload(*v) + } + return _u +} + +// ClearPayload clears the value of the "payload" field. +func (_u *BackupJobEventUpdate) ClearPayload() *BackupJobEventUpdate { + _u.mutation.ClearPayload() + return _u +} + +// SetEventTime sets the "event_time" field. +func (_u *BackupJobEventUpdate) SetEventTime(v time.Time) *BackupJobEventUpdate { + _u.mutation.SetEventTime(v) + return _u +} + +// SetNillableEventTime sets the "event_time" field if the given value is not nil. +func (_u *BackupJobEventUpdate) SetNillableEventTime(v *time.Time) *BackupJobEventUpdate { + if v != nil { + _u.SetEventTime(*v) + } + return _u +} + +// SetJobID sets the "job" edge to the BackupJob entity by ID. +func (_u *BackupJobEventUpdate) SetJobID(id int) *BackupJobEventUpdate { + _u.mutation.SetJobID(id) + return _u +} + +// SetJob sets the "job" edge to the BackupJob entity. +func (_u *BackupJobEventUpdate) SetJob(v *BackupJob) *BackupJobEventUpdate { + return _u.SetJobID(v.ID) +} + +// Mutation returns the BackupJobEventMutation object of the builder. +func (_u *BackupJobEventUpdate) Mutation() *BackupJobEventMutation { + return _u.mutation +} + +// ClearJob clears the "job" edge to the BackupJob entity. +func (_u *BackupJobEventUpdate) ClearJob() *BackupJobEventUpdate { + _u.mutation.ClearJob() + return _u +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *BackupJobEventUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupJobEventUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *BackupJobEventUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupJobEventUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupJobEventUpdate) check() error { + if v, ok := _u.mutation.Level(); ok { + if err := backupjobevent.LevelValidator(v); err != nil { + return &ValidationError{Name: "level", err: fmt.Errorf(`ent: validator failed for field "BackupJobEvent.level": %w`, err)} + } + } + if _u.mutation.JobCleared() && len(_u.mutation.JobIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "BackupJobEvent.job"`) + } + return nil +} + +func (_u *BackupJobEventUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupjobevent.Table, backupjobevent.Columns, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Level(); ok { + _spec.SetField(backupjobevent.FieldLevel, field.TypeEnum, value) + } + if value, ok := _u.mutation.EventType(); ok { + _spec.SetField(backupjobevent.FieldEventType, field.TypeString, value) + } + if value, ok := _u.mutation.Message(); ok { + _spec.SetField(backupjobevent.FieldMessage, field.TypeString, value) + } + if value, ok := _u.mutation.Payload(); ok { + _spec.SetField(backupjobevent.FieldPayload, field.TypeString, value) + } + if _u.mutation.PayloadCleared() { + _spec.ClearField(backupjobevent.FieldPayload, field.TypeString) + } + if value, ok := _u.mutation.EventTime(); ok { + _spec.SetField(backupjobevent.FieldEventTime, field.TypeTime, value) + } + if _u.mutation.JobCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: backupjobevent.JobTable, + Columns: []string{backupjobevent.JobColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.JobIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: backupjobevent.JobTable, + Columns: []string{backupjobevent.JobColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupjobevent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// BackupJobEventUpdateOne is the builder for updating a single BackupJobEvent entity. +type BackupJobEventUpdateOne struct { + config + fields []string + hooks []Hook + mutation *BackupJobEventMutation +} + +// SetBackupJobID sets the "backup_job_id" field. +func (_u *BackupJobEventUpdateOne) SetBackupJobID(v int) *BackupJobEventUpdateOne { + _u.mutation.SetBackupJobID(v) + return _u +} + +// SetNillableBackupJobID sets the "backup_job_id" field if the given value is not nil. +func (_u *BackupJobEventUpdateOne) SetNillableBackupJobID(v *int) *BackupJobEventUpdateOne { + if v != nil { + _u.SetBackupJobID(*v) + } + return _u +} + +// SetLevel sets the "level" field. +func (_u *BackupJobEventUpdateOne) SetLevel(v backupjobevent.Level) *BackupJobEventUpdateOne { + _u.mutation.SetLevel(v) + return _u +} + +// SetNillableLevel sets the "level" field if the given value is not nil. +func (_u *BackupJobEventUpdateOne) SetNillableLevel(v *backupjobevent.Level) *BackupJobEventUpdateOne { + if v != nil { + _u.SetLevel(*v) + } + return _u +} + +// SetEventType sets the "event_type" field. +func (_u *BackupJobEventUpdateOne) SetEventType(v string) *BackupJobEventUpdateOne { + _u.mutation.SetEventType(v) + return _u +} + +// SetNillableEventType sets the "event_type" field if the given value is not nil. +func (_u *BackupJobEventUpdateOne) SetNillableEventType(v *string) *BackupJobEventUpdateOne { + if v != nil { + _u.SetEventType(*v) + } + return _u +} + +// SetMessage sets the "message" field. +func (_u *BackupJobEventUpdateOne) SetMessage(v string) *BackupJobEventUpdateOne { + _u.mutation.SetMessage(v) + return _u +} + +// SetNillableMessage sets the "message" field if the given value is not nil. +func (_u *BackupJobEventUpdateOne) SetNillableMessage(v *string) *BackupJobEventUpdateOne { + if v != nil { + _u.SetMessage(*v) + } + return _u +} + +// SetPayload sets the "payload" field. +func (_u *BackupJobEventUpdateOne) SetPayload(v string) *BackupJobEventUpdateOne { + _u.mutation.SetPayload(v) + return _u +} + +// SetNillablePayload sets the "payload" field if the given value is not nil. +func (_u *BackupJobEventUpdateOne) SetNillablePayload(v *string) *BackupJobEventUpdateOne { + if v != nil { + _u.SetPayload(*v) + } + return _u +} + +// ClearPayload clears the value of the "payload" field. +func (_u *BackupJobEventUpdateOne) ClearPayload() *BackupJobEventUpdateOne { + _u.mutation.ClearPayload() + return _u +} + +// SetEventTime sets the "event_time" field. +func (_u *BackupJobEventUpdateOne) SetEventTime(v time.Time) *BackupJobEventUpdateOne { + _u.mutation.SetEventTime(v) + return _u +} + +// SetNillableEventTime sets the "event_time" field if the given value is not nil. +func (_u *BackupJobEventUpdateOne) SetNillableEventTime(v *time.Time) *BackupJobEventUpdateOne { + if v != nil { + _u.SetEventTime(*v) + } + return _u +} + +// SetJobID sets the "job" edge to the BackupJob entity by ID. +func (_u *BackupJobEventUpdateOne) SetJobID(id int) *BackupJobEventUpdateOne { + _u.mutation.SetJobID(id) + return _u +} + +// SetJob sets the "job" edge to the BackupJob entity. +func (_u *BackupJobEventUpdateOne) SetJob(v *BackupJob) *BackupJobEventUpdateOne { + return _u.SetJobID(v.ID) +} + +// Mutation returns the BackupJobEventMutation object of the builder. +func (_u *BackupJobEventUpdateOne) Mutation() *BackupJobEventMutation { + return _u.mutation +} + +// ClearJob clears the "job" edge to the BackupJob entity. +func (_u *BackupJobEventUpdateOne) ClearJob() *BackupJobEventUpdateOne { + _u.mutation.ClearJob() + return _u +} + +// Where appends a list predicates to the BackupJobEventUpdate builder. +func (_u *BackupJobEventUpdateOne) Where(ps ...predicate.BackupJobEvent) *BackupJobEventUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *BackupJobEventUpdateOne) Select(field string, fields ...string) *BackupJobEventUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated BackupJobEvent entity. +func (_u *BackupJobEventUpdateOne) Save(ctx context.Context) (*BackupJobEvent, error) { + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupJobEventUpdateOne) SaveX(ctx context.Context) *BackupJobEvent { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *BackupJobEventUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupJobEventUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupJobEventUpdateOne) check() error { + if v, ok := _u.mutation.Level(); ok { + if err := backupjobevent.LevelValidator(v); err != nil { + return &ValidationError{Name: "level", err: fmt.Errorf(`ent: validator failed for field "BackupJobEvent.level": %w`, err)} + } + } + if _u.mutation.JobCleared() && len(_u.mutation.JobIDs()) > 0 { + return errors.New(`ent: clearing a required unique edge "BackupJobEvent.job"`) + } + return nil +} + +func (_u *BackupJobEventUpdateOne) sqlSave(ctx context.Context) (_node *BackupJobEvent, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupjobevent.Table, backupjobevent.Columns, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupJobEvent.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupjobevent.FieldID) + for _, f := range fields { + if !backupjobevent.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != backupjobevent.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Level(); ok { + _spec.SetField(backupjobevent.FieldLevel, field.TypeEnum, value) + } + if value, ok := _u.mutation.EventType(); ok { + _spec.SetField(backupjobevent.FieldEventType, field.TypeString, value) + } + if value, ok := _u.mutation.Message(); ok { + _spec.SetField(backupjobevent.FieldMessage, field.TypeString, value) + } + if value, ok := _u.mutation.Payload(); ok { + _spec.SetField(backupjobevent.FieldPayload, field.TypeString, value) + } + if _u.mutation.PayloadCleared() { + _spec.ClearField(backupjobevent.FieldPayload, field.TypeString) + } + if value, ok := _u.mutation.EventTime(); ok { + _spec.SetField(backupjobevent.FieldEventTime, field.TypeTime, value) + } + if _u.mutation.JobCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: backupjobevent.JobTable, + Columns: []string{backupjobevent.JobColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := _u.mutation.JobIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: backupjobevent.JobTable, + Columns: []string{backupjobevent.JobColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &BackupJobEvent{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupjobevent.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backup/ent/backups3config.go b/backup/ent/backups3config.go new file mode 100644 index 000000000..2678d3b34 --- /dev/null +++ b/backup/ent/backups3config.go @@ -0,0 +1,217 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" +) + +// BackupS3Config is the model entity for the BackupS3Config schema. +type BackupS3Config struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // Enabled holds the value of the "enabled" field. + Enabled bool `json:"enabled,omitempty"` + // Endpoint holds the value of the "endpoint" field. + Endpoint string `json:"endpoint,omitempty"` + // Region holds the value of the "region" field. + Region string `json:"region,omitempty"` + // Bucket holds the value of the "bucket" field. + Bucket string `json:"bucket,omitempty"` + // AccessKeyID holds the value of the "access_key_id" field. + AccessKeyID string `json:"access_key_id,omitempty"` + // SecretAccessKeyEncrypted holds the value of the "secret_access_key_encrypted" field. + SecretAccessKeyEncrypted string `json:"-"` + // Prefix holds the value of the "prefix" field. + Prefix string `json:"prefix,omitempty"` + // ForcePathStyle holds the value of the "force_path_style" field. + ForcePathStyle bool `json:"force_path_style,omitempty"` + // UseSsl holds the value of the "use_ssl" field. + UseSsl bool `json:"use_ssl,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*BackupS3Config) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case backups3config.FieldEnabled, backups3config.FieldForcePathStyle, backups3config.FieldUseSsl: + values[i] = new(sql.NullBool) + case backups3config.FieldID: + values[i] = new(sql.NullInt64) + case backups3config.FieldEndpoint, backups3config.FieldRegion, backups3config.FieldBucket, backups3config.FieldAccessKeyID, backups3config.FieldSecretAccessKeyEncrypted, backups3config.FieldPrefix: + values[i] = new(sql.NullString) + case backups3config.FieldCreatedAt, backups3config.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the BackupS3Config fields. +func (_m *BackupS3Config) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case backups3config.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int(value.Int64) + case backups3config.FieldEnabled: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field enabled", values[i]) + } else if value.Valid { + _m.Enabled = value.Bool + } + case backups3config.FieldEndpoint: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field endpoint", values[i]) + } else if value.Valid { + _m.Endpoint = value.String + } + case backups3config.FieldRegion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field region", values[i]) + } else if value.Valid { + _m.Region = value.String + } + case backups3config.FieldBucket: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field bucket", values[i]) + } else if value.Valid { + _m.Bucket = value.String + } + case backups3config.FieldAccessKeyID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field access_key_id", values[i]) + } else if value.Valid { + _m.AccessKeyID = value.String + } + case backups3config.FieldSecretAccessKeyEncrypted: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field secret_access_key_encrypted", values[i]) + } else if value.Valid { + _m.SecretAccessKeyEncrypted = value.String + } + case backups3config.FieldPrefix: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field prefix", values[i]) + } else if value.Valid { + _m.Prefix = value.String + } + case backups3config.FieldForcePathStyle: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field force_path_style", values[i]) + } else if value.Valid { + _m.ForcePathStyle = value.Bool + } + case backups3config.FieldUseSsl: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field use_ssl", values[i]) + } else if value.Valid { + _m.UseSsl = value.Bool + } + case backups3config.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case backups3config.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the BackupS3Config. +// This includes values selected through modifiers, order, etc. +func (_m *BackupS3Config) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this BackupS3Config. +// Note that you need to call BackupS3Config.Unwrap() before calling this method if this BackupS3Config +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *BackupS3Config) Update() *BackupS3ConfigUpdateOne { + return NewBackupS3ConfigClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the BackupS3Config entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *BackupS3Config) Unwrap() *BackupS3Config { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: BackupS3Config is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *BackupS3Config) String() string { + var builder strings.Builder + builder.WriteString("BackupS3Config(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("enabled=") + builder.WriteString(fmt.Sprintf("%v", _m.Enabled)) + builder.WriteString(", ") + builder.WriteString("endpoint=") + builder.WriteString(_m.Endpoint) + builder.WriteString(", ") + builder.WriteString("region=") + builder.WriteString(_m.Region) + builder.WriteString(", ") + builder.WriteString("bucket=") + builder.WriteString(_m.Bucket) + builder.WriteString(", ") + builder.WriteString("access_key_id=") + builder.WriteString(_m.AccessKeyID) + builder.WriteString(", ") + builder.WriteString("secret_access_key_encrypted=") + builder.WriteString(", ") + builder.WriteString("prefix=") + builder.WriteString(_m.Prefix) + builder.WriteString(", ") + builder.WriteString("force_path_style=") + builder.WriteString(fmt.Sprintf("%v", _m.ForcePathStyle)) + builder.WriteString(", ") + builder.WriteString("use_ssl=") + builder.WriteString(fmt.Sprintf("%v", _m.UseSsl)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// BackupS3Configs is a parsable slice of BackupS3Config. +type BackupS3Configs []*BackupS3Config diff --git a/backup/ent/backups3config/backups3config.go b/backup/ent/backups3config/backups3config.go new file mode 100644 index 000000000..b93dbbe8b --- /dev/null +++ b/backup/ent/backups3config/backups3config.go @@ -0,0 +1,154 @@ +// Code generated by ent, DO NOT EDIT. + +package backups3config + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the backups3config type in the database. + Label = "backup_s3config" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldEnabled holds the string denoting the enabled field in the database. + FieldEnabled = "enabled" + // FieldEndpoint holds the string denoting the endpoint field in the database. + FieldEndpoint = "endpoint" + // FieldRegion holds the string denoting the region field in the database. + FieldRegion = "region" + // FieldBucket holds the string denoting the bucket field in the database. + FieldBucket = "bucket" + // FieldAccessKeyID holds the string denoting the access_key_id field in the database. + FieldAccessKeyID = "access_key_id" + // FieldSecretAccessKeyEncrypted holds the string denoting the secret_access_key_encrypted field in the database. + FieldSecretAccessKeyEncrypted = "secret_access_key_encrypted" + // FieldPrefix holds the string denoting the prefix field in the database. + FieldPrefix = "prefix" + // FieldForcePathStyle holds the string denoting the force_path_style field in the database. + FieldForcePathStyle = "force_path_style" + // FieldUseSsl holds the string denoting the use_ssl field in the database. + FieldUseSsl = "use_ssl" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // Table holds the table name of the backups3config in the database. + Table = "backup_s3configs" +) + +// Columns holds all SQL columns for backups3config fields. +var Columns = []string{ + FieldID, + FieldEnabled, + FieldEndpoint, + FieldRegion, + FieldBucket, + FieldAccessKeyID, + FieldSecretAccessKeyEncrypted, + FieldPrefix, + FieldForcePathStyle, + FieldUseSsl, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultEnabled holds the default value on creation for the "enabled" field. + DefaultEnabled bool + // DefaultEndpoint holds the default value on creation for the "endpoint" field. + DefaultEndpoint string + // DefaultRegion holds the default value on creation for the "region" field. + DefaultRegion string + // DefaultBucket holds the default value on creation for the "bucket" field. + DefaultBucket string + // DefaultAccessKeyID holds the default value on creation for the "access_key_id" field. + DefaultAccessKeyID string + // DefaultPrefix holds the default value on creation for the "prefix" field. + DefaultPrefix string + // DefaultForcePathStyle holds the default value on creation for the "force_path_style" field. + DefaultForcePathStyle bool + // DefaultUseSsl holds the default value on creation for the "use_ssl" field. + DefaultUseSsl bool + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// OrderOption defines the ordering options for the BackupS3Config queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByEnabled orders the results by the enabled field. +func ByEnabled(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEnabled, opts...).ToFunc() +} + +// ByEndpoint orders the results by the endpoint field. +func ByEndpoint(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldEndpoint, opts...).ToFunc() +} + +// ByRegion orders the results by the region field. +func ByRegion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRegion, opts...).ToFunc() +} + +// ByBucket orders the results by the bucket field. +func ByBucket(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBucket, opts...).ToFunc() +} + +// ByAccessKeyID orders the results by the access_key_id field. +func ByAccessKeyID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAccessKeyID, opts...).ToFunc() +} + +// BySecretAccessKeyEncrypted orders the results by the secret_access_key_encrypted field. +func BySecretAccessKeyEncrypted(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSecretAccessKeyEncrypted, opts...).ToFunc() +} + +// ByPrefix orders the results by the prefix field. +func ByPrefix(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPrefix, opts...).ToFunc() +} + +// ByForcePathStyle orders the results by the force_path_style field. +func ByForcePathStyle(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldForcePathStyle, opts...).ToFunc() +} + +// ByUseSsl orders the results by the use_ssl field. +func ByUseSsl(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUseSsl, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} diff --git a/backup/ent/backups3config/where.go b/backup/ent/backups3config/where.go new file mode 100644 index 000000000..8c3cf953a --- /dev/null +++ b/backup/ent/backups3config/where.go @@ -0,0 +1,635 @@ +// Code generated by ent, DO NOT EDIT. + +package backups3config + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldID, id)) +} + +// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ. +func Enabled(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldEnabled, v)) +} + +// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ. +func Endpoint(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldEndpoint, v)) +} + +// Region applies equality check predicate on the "region" field. It's identical to RegionEQ. +func Region(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldRegion, v)) +} + +// Bucket applies equality check predicate on the "bucket" field. It's identical to BucketEQ. +func Bucket(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldBucket, v)) +} + +// AccessKeyID applies equality check predicate on the "access_key_id" field. It's identical to AccessKeyIDEQ. +func AccessKeyID(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldAccessKeyID, v)) +} + +// SecretAccessKeyEncrypted applies equality check predicate on the "secret_access_key_encrypted" field. It's identical to SecretAccessKeyEncryptedEQ. +func SecretAccessKeyEncrypted(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldSecretAccessKeyEncrypted, v)) +} + +// Prefix applies equality check predicate on the "prefix" field. It's identical to PrefixEQ. +func Prefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldPrefix, v)) +} + +// ForcePathStyle applies equality check predicate on the "force_path_style" field. It's identical to ForcePathStyleEQ. +func ForcePathStyle(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldForcePathStyle, v)) +} + +// UseSsl applies equality check predicate on the "use_ssl" field. It's identical to UseSslEQ. +func UseSsl(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldUseSsl, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// EnabledEQ applies the EQ predicate on the "enabled" field. +func EnabledEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldEnabled, v)) +} + +// EnabledNEQ applies the NEQ predicate on the "enabled" field. +func EnabledNEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldEnabled, v)) +} + +// EndpointEQ applies the EQ predicate on the "endpoint" field. +func EndpointEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldEndpoint, v)) +} + +// EndpointNEQ applies the NEQ predicate on the "endpoint" field. +func EndpointNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldEndpoint, v)) +} + +// EndpointIn applies the In predicate on the "endpoint" field. +func EndpointIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldEndpoint, vs...)) +} + +// EndpointNotIn applies the NotIn predicate on the "endpoint" field. +func EndpointNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldEndpoint, vs...)) +} + +// EndpointGT applies the GT predicate on the "endpoint" field. +func EndpointGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldEndpoint, v)) +} + +// EndpointGTE applies the GTE predicate on the "endpoint" field. +func EndpointGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldEndpoint, v)) +} + +// EndpointLT applies the LT predicate on the "endpoint" field. +func EndpointLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldEndpoint, v)) +} + +// EndpointLTE applies the LTE predicate on the "endpoint" field. +func EndpointLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldEndpoint, v)) +} + +// EndpointContains applies the Contains predicate on the "endpoint" field. +func EndpointContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldEndpoint, v)) +} + +// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field. +func EndpointHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldEndpoint, v)) +} + +// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field. +func EndpointHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldEndpoint, v)) +} + +// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field. +func EndpointEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldEndpoint, v)) +} + +// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field. +func EndpointContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldEndpoint, v)) +} + +// RegionEQ applies the EQ predicate on the "region" field. +func RegionEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldRegion, v)) +} + +// RegionNEQ applies the NEQ predicate on the "region" field. +func RegionNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldRegion, v)) +} + +// RegionIn applies the In predicate on the "region" field. +func RegionIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldRegion, vs...)) +} + +// RegionNotIn applies the NotIn predicate on the "region" field. +func RegionNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldRegion, vs...)) +} + +// RegionGT applies the GT predicate on the "region" field. +func RegionGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldRegion, v)) +} + +// RegionGTE applies the GTE predicate on the "region" field. +func RegionGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldRegion, v)) +} + +// RegionLT applies the LT predicate on the "region" field. +func RegionLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldRegion, v)) +} + +// RegionLTE applies the LTE predicate on the "region" field. +func RegionLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldRegion, v)) +} + +// RegionContains applies the Contains predicate on the "region" field. +func RegionContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldRegion, v)) +} + +// RegionHasPrefix applies the HasPrefix predicate on the "region" field. +func RegionHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldRegion, v)) +} + +// RegionHasSuffix applies the HasSuffix predicate on the "region" field. +func RegionHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldRegion, v)) +} + +// RegionEqualFold applies the EqualFold predicate on the "region" field. +func RegionEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldRegion, v)) +} + +// RegionContainsFold applies the ContainsFold predicate on the "region" field. +func RegionContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldRegion, v)) +} + +// BucketEQ applies the EQ predicate on the "bucket" field. +func BucketEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldBucket, v)) +} + +// BucketNEQ applies the NEQ predicate on the "bucket" field. +func BucketNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldBucket, v)) +} + +// BucketIn applies the In predicate on the "bucket" field. +func BucketIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldBucket, vs...)) +} + +// BucketNotIn applies the NotIn predicate on the "bucket" field. +func BucketNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldBucket, vs...)) +} + +// BucketGT applies the GT predicate on the "bucket" field. +func BucketGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldBucket, v)) +} + +// BucketGTE applies the GTE predicate on the "bucket" field. +func BucketGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldBucket, v)) +} + +// BucketLT applies the LT predicate on the "bucket" field. +func BucketLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldBucket, v)) +} + +// BucketLTE applies the LTE predicate on the "bucket" field. +func BucketLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldBucket, v)) +} + +// BucketContains applies the Contains predicate on the "bucket" field. +func BucketContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldBucket, v)) +} + +// BucketHasPrefix applies the HasPrefix predicate on the "bucket" field. +func BucketHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldBucket, v)) +} + +// BucketHasSuffix applies the HasSuffix predicate on the "bucket" field. +func BucketHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldBucket, v)) +} + +// BucketEqualFold applies the EqualFold predicate on the "bucket" field. +func BucketEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldBucket, v)) +} + +// BucketContainsFold applies the ContainsFold predicate on the "bucket" field. +func BucketContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldBucket, v)) +} + +// AccessKeyIDEQ applies the EQ predicate on the "access_key_id" field. +func AccessKeyIDEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldAccessKeyID, v)) +} + +// AccessKeyIDNEQ applies the NEQ predicate on the "access_key_id" field. +func AccessKeyIDNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldAccessKeyID, v)) +} + +// AccessKeyIDIn applies the In predicate on the "access_key_id" field. +func AccessKeyIDIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldAccessKeyID, vs...)) +} + +// AccessKeyIDNotIn applies the NotIn predicate on the "access_key_id" field. +func AccessKeyIDNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldAccessKeyID, vs...)) +} + +// AccessKeyIDGT applies the GT predicate on the "access_key_id" field. +func AccessKeyIDGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldAccessKeyID, v)) +} + +// AccessKeyIDGTE applies the GTE predicate on the "access_key_id" field. +func AccessKeyIDGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldAccessKeyID, v)) +} + +// AccessKeyIDLT applies the LT predicate on the "access_key_id" field. +func AccessKeyIDLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldAccessKeyID, v)) +} + +// AccessKeyIDLTE applies the LTE predicate on the "access_key_id" field. +func AccessKeyIDLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldAccessKeyID, v)) +} + +// AccessKeyIDContains applies the Contains predicate on the "access_key_id" field. +func AccessKeyIDContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldAccessKeyID, v)) +} + +// AccessKeyIDHasPrefix applies the HasPrefix predicate on the "access_key_id" field. +func AccessKeyIDHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldAccessKeyID, v)) +} + +// AccessKeyIDHasSuffix applies the HasSuffix predicate on the "access_key_id" field. +func AccessKeyIDHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldAccessKeyID, v)) +} + +// AccessKeyIDEqualFold applies the EqualFold predicate on the "access_key_id" field. +func AccessKeyIDEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldAccessKeyID, v)) +} + +// AccessKeyIDContainsFold applies the ContainsFold predicate on the "access_key_id" field. +func AccessKeyIDContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldAccessKeyID, v)) +} + +// SecretAccessKeyEncryptedEQ applies the EQ predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedNEQ applies the NEQ predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedIn applies the In predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldSecretAccessKeyEncrypted, vs...)) +} + +// SecretAccessKeyEncryptedNotIn applies the NotIn predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldSecretAccessKeyEncrypted, vs...)) +} + +// SecretAccessKeyEncryptedGT applies the GT predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedGTE applies the GTE predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedLT applies the LT predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedLTE applies the LTE predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedContains applies the Contains predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedHasPrefix applies the HasPrefix predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedHasSuffix applies the HasSuffix predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedIsNil applies the IsNil predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedIsNil() predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIsNull(FieldSecretAccessKeyEncrypted)) +} + +// SecretAccessKeyEncryptedNotNil applies the NotNil predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedNotNil() predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotNull(FieldSecretAccessKeyEncrypted)) +} + +// SecretAccessKeyEncryptedEqualFold applies the EqualFold predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldSecretAccessKeyEncrypted, v)) +} + +// SecretAccessKeyEncryptedContainsFold applies the ContainsFold predicate on the "secret_access_key_encrypted" field. +func SecretAccessKeyEncryptedContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldSecretAccessKeyEncrypted, v)) +} + +// PrefixEQ applies the EQ predicate on the "prefix" field. +func PrefixEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldPrefix, v)) +} + +// PrefixNEQ applies the NEQ predicate on the "prefix" field. +func PrefixNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldPrefix, v)) +} + +// PrefixIn applies the In predicate on the "prefix" field. +func PrefixIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldPrefix, vs...)) +} + +// PrefixNotIn applies the NotIn predicate on the "prefix" field. +func PrefixNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldPrefix, vs...)) +} + +// PrefixGT applies the GT predicate on the "prefix" field. +func PrefixGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldPrefix, v)) +} + +// PrefixGTE applies the GTE predicate on the "prefix" field. +func PrefixGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldPrefix, v)) +} + +// PrefixLT applies the LT predicate on the "prefix" field. +func PrefixLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldPrefix, v)) +} + +// PrefixLTE applies the LTE predicate on the "prefix" field. +func PrefixLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldPrefix, v)) +} + +// PrefixContains applies the Contains predicate on the "prefix" field. +func PrefixContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldPrefix, v)) +} + +// PrefixHasPrefix applies the HasPrefix predicate on the "prefix" field. +func PrefixHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldPrefix, v)) +} + +// PrefixHasSuffix applies the HasSuffix predicate on the "prefix" field. +func PrefixHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldPrefix, v)) +} + +// PrefixEqualFold applies the EqualFold predicate on the "prefix" field. +func PrefixEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldPrefix, v)) +} + +// PrefixContainsFold applies the ContainsFold predicate on the "prefix" field. +func PrefixContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldPrefix, v)) +} + +// ForcePathStyleEQ applies the EQ predicate on the "force_path_style" field. +func ForcePathStyleEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldForcePathStyle, v)) +} + +// ForcePathStyleNEQ applies the NEQ predicate on the "force_path_style" field. +func ForcePathStyleNEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldForcePathStyle, v)) +} + +// UseSslEQ applies the EQ predicate on the "use_ssl" field. +func UseSslEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldUseSsl, v)) +} + +// UseSslNEQ applies the NEQ predicate on the "use_ssl" field. +func UseSslNEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldUseSsl, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.BackupS3Config) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.BackupS3Config) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.BackupS3Config) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.NotPredicates(p)) +} diff --git a/backup/ent/backups3config_create.go b/backup/ent/backups3config_create.go new file mode 100644 index 000000000..268593efe --- /dev/null +++ b/backup/ent/backups3config_create.go @@ -0,0 +1,445 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" +) + +// BackupS3ConfigCreate is the builder for creating a BackupS3Config entity. +type BackupS3ConfigCreate struct { + config + mutation *BackupS3ConfigMutation + hooks []Hook +} + +// SetEnabled sets the "enabled" field. +func (_c *BackupS3ConfigCreate) SetEnabled(v bool) *BackupS3ConfigCreate { + _c.mutation.SetEnabled(v) + return _c +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableEnabled(v *bool) *BackupS3ConfigCreate { + if v != nil { + _c.SetEnabled(*v) + } + return _c +} + +// SetEndpoint sets the "endpoint" field. +func (_c *BackupS3ConfigCreate) SetEndpoint(v string) *BackupS3ConfigCreate { + _c.mutation.SetEndpoint(v) + return _c +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableEndpoint(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetEndpoint(*v) + } + return _c +} + +// SetRegion sets the "region" field. +func (_c *BackupS3ConfigCreate) SetRegion(v string) *BackupS3ConfigCreate { + _c.mutation.SetRegion(v) + return _c +} + +// SetNillableRegion sets the "region" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableRegion(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetRegion(*v) + } + return _c +} + +// SetBucket sets the "bucket" field. +func (_c *BackupS3ConfigCreate) SetBucket(v string) *BackupS3ConfigCreate { + _c.mutation.SetBucket(v) + return _c +} + +// SetNillableBucket sets the "bucket" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableBucket(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetBucket(*v) + } + return _c +} + +// SetAccessKeyID sets the "access_key_id" field. +func (_c *BackupS3ConfigCreate) SetAccessKeyID(v string) *BackupS3ConfigCreate { + _c.mutation.SetAccessKeyID(v) + return _c +} + +// SetNillableAccessKeyID sets the "access_key_id" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableAccessKeyID(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetAccessKeyID(*v) + } + return _c +} + +// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. +func (_c *BackupS3ConfigCreate) SetSecretAccessKeyEncrypted(v string) *BackupS3ConfigCreate { + _c.mutation.SetSecretAccessKeyEncrypted(v) + return _c +} + +// SetNillableSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableSecretAccessKeyEncrypted(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetSecretAccessKeyEncrypted(*v) + } + return _c +} + +// SetPrefix sets the "prefix" field. +func (_c *BackupS3ConfigCreate) SetPrefix(v string) *BackupS3ConfigCreate { + _c.mutation.SetPrefix(v) + return _c +} + +// SetNillablePrefix sets the "prefix" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillablePrefix(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetPrefix(*v) + } + return _c +} + +// SetForcePathStyle sets the "force_path_style" field. +func (_c *BackupS3ConfigCreate) SetForcePathStyle(v bool) *BackupS3ConfigCreate { + _c.mutation.SetForcePathStyle(v) + return _c +} + +// SetNillableForcePathStyle sets the "force_path_style" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableForcePathStyle(v *bool) *BackupS3ConfigCreate { + if v != nil { + _c.SetForcePathStyle(*v) + } + return _c +} + +// SetUseSsl sets the "use_ssl" field. +func (_c *BackupS3ConfigCreate) SetUseSsl(v bool) *BackupS3ConfigCreate { + _c.mutation.SetUseSsl(v) + return _c +} + +// SetNillableUseSsl sets the "use_ssl" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableUseSsl(v *bool) *BackupS3ConfigCreate { + if v != nil { + _c.SetUseSsl(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *BackupS3ConfigCreate) SetCreatedAt(v time.Time) *BackupS3ConfigCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableCreatedAt(v *time.Time) *BackupS3ConfigCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *BackupS3ConfigCreate) SetUpdatedAt(v time.Time) *BackupS3ConfigCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableUpdatedAt(v *time.Time) *BackupS3ConfigCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// Mutation returns the BackupS3ConfigMutation object of the builder. +func (_c *BackupS3ConfigCreate) Mutation() *BackupS3ConfigMutation { + return _c.mutation +} + +// Save creates the BackupS3Config in the database. +func (_c *BackupS3ConfigCreate) Save(ctx context.Context) (*BackupS3Config, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *BackupS3ConfigCreate) SaveX(ctx context.Context) *BackupS3Config { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupS3ConfigCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupS3ConfigCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *BackupS3ConfigCreate) defaults() { + if _, ok := _c.mutation.Enabled(); !ok { + v := backups3config.DefaultEnabled + _c.mutation.SetEnabled(v) + } + if _, ok := _c.mutation.Endpoint(); !ok { + v := backups3config.DefaultEndpoint + _c.mutation.SetEndpoint(v) + } + if _, ok := _c.mutation.Region(); !ok { + v := backups3config.DefaultRegion + _c.mutation.SetRegion(v) + } + if _, ok := _c.mutation.Bucket(); !ok { + v := backups3config.DefaultBucket + _c.mutation.SetBucket(v) + } + if _, ok := _c.mutation.AccessKeyID(); !ok { + v := backups3config.DefaultAccessKeyID + _c.mutation.SetAccessKeyID(v) + } + if _, ok := _c.mutation.Prefix(); !ok { + v := backups3config.DefaultPrefix + _c.mutation.SetPrefix(v) + } + if _, ok := _c.mutation.ForcePathStyle(); !ok { + v := backups3config.DefaultForcePathStyle + _c.mutation.SetForcePathStyle(v) + } + if _, ok := _c.mutation.UseSsl(); !ok { + v := backups3config.DefaultUseSsl + _c.mutation.SetUseSsl(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := backups3config.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := backups3config.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *BackupS3ConfigCreate) check() error { + if _, ok := _c.mutation.Enabled(); !ok { + return &ValidationError{Name: "enabled", err: errors.New(`ent: missing required field "BackupS3Config.enabled"`)} + } + if _, ok := _c.mutation.Endpoint(); !ok { + return &ValidationError{Name: "endpoint", err: errors.New(`ent: missing required field "BackupS3Config.endpoint"`)} + } + if _, ok := _c.mutation.Region(); !ok { + return &ValidationError{Name: "region", err: errors.New(`ent: missing required field "BackupS3Config.region"`)} + } + if _, ok := _c.mutation.Bucket(); !ok { + return &ValidationError{Name: "bucket", err: errors.New(`ent: missing required field "BackupS3Config.bucket"`)} + } + if _, ok := _c.mutation.AccessKeyID(); !ok { + return &ValidationError{Name: "access_key_id", err: errors.New(`ent: missing required field "BackupS3Config.access_key_id"`)} + } + if _, ok := _c.mutation.Prefix(); !ok { + return &ValidationError{Name: "prefix", err: errors.New(`ent: missing required field "BackupS3Config.prefix"`)} + } + if _, ok := _c.mutation.ForcePathStyle(); !ok { + return &ValidationError{Name: "force_path_style", err: errors.New(`ent: missing required field "BackupS3Config.force_path_style"`)} + } + if _, ok := _c.mutation.UseSsl(); !ok { + return &ValidationError{Name: "use_ssl", err: errors.New(`ent: missing required field "BackupS3Config.use_ssl"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupS3Config.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupS3Config.updated_at"`)} + } + return nil +} + +func (_c *BackupS3ConfigCreate) sqlSave(ctx context.Context) (*BackupS3Config, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *BackupS3ConfigCreate) createSpec() (*BackupS3Config, *sqlgraph.CreateSpec) { + var ( + _node = &BackupS3Config{config: _c.config} + _spec = sqlgraph.NewCreateSpec(backups3config.Table, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) + ) + if value, ok := _c.mutation.Enabled(); ok { + _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) + _node.Enabled = value + } + if value, ok := _c.mutation.Endpoint(); ok { + _spec.SetField(backups3config.FieldEndpoint, field.TypeString, value) + _node.Endpoint = value + } + if value, ok := _c.mutation.Region(); ok { + _spec.SetField(backups3config.FieldRegion, field.TypeString, value) + _node.Region = value + } + if value, ok := _c.mutation.Bucket(); ok { + _spec.SetField(backups3config.FieldBucket, field.TypeString, value) + _node.Bucket = value + } + if value, ok := _c.mutation.AccessKeyID(); ok { + _spec.SetField(backups3config.FieldAccessKeyID, field.TypeString, value) + _node.AccessKeyID = value + } + if value, ok := _c.mutation.SecretAccessKeyEncrypted(); ok { + _spec.SetField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString, value) + _node.SecretAccessKeyEncrypted = value + } + if value, ok := _c.mutation.Prefix(); ok { + _spec.SetField(backups3config.FieldPrefix, field.TypeString, value) + _node.Prefix = value + } + if value, ok := _c.mutation.ForcePathStyle(); ok { + _spec.SetField(backups3config.FieldForcePathStyle, field.TypeBool, value) + _node.ForcePathStyle = value + } + if value, ok := _c.mutation.UseSsl(); ok { + _spec.SetField(backups3config.FieldUseSsl, field.TypeBool, value) + _node.UseSsl = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(backups3config.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(backups3config.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + return _node, _spec +} + +// BackupS3ConfigCreateBulk is the builder for creating many BackupS3Config entities in bulk. +type BackupS3ConfigCreateBulk struct { + config + err error + builders []*BackupS3ConfigCreate +} + +// Save creates the BackupS3Config entities in the database. +func (_c *BackupS3ConfigCreateBulk) Save(ctx context.Context) ([]*BackupS3Config, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*BackupS3Config, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BackupS3ConfigMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *BackupS3ConfigCreateBulk) SaveX(ctx context.Context) []*BackupS3Config { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupS3ConfigCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupS3ConfigCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backups3config_delete.go b/backup/ent/backups3config_delete.go new file mode 100644 index 000000000..0cfbbc196 --- /dev/null +++ b/backup/ent/backups3config_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupS3ConfigDelete is the builder for deleting a BackupS3Config entity. +type BackupS3ConfigDelete struct { + config + hooks []Hook + mutation *BackupS3ConfigMutation +} + +// Where appends a list predicates to the BackupS3ConfigDelete builder. +func (_d *BackupS3ConfigDelete) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *BackupS3ConfigDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupS3ConfigDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *BackupS3ConfigDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(backups3config.Table, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// BackupS3ConfigDeleteOne is the builder for deleting a single BackupS3Config entity. +type BackupS3ConfigDeleteOne struct { + _d *BackupS3ConfigDelete +} + +// Where appends a list predicates to the BackupS3ConfigDelete builder. +func (_d *BackupS3ConfigDeleteOne) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *BackupS3ConfigDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{backups3config.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupS3ConfigDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backups3config_query.go b/backup/ent/backups3config_query.go new file mode 100644 index 000000000..11bdf7f01 --- /dev/null +++ b/backup/ent/backups3config_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupS3ConfigQuery is the builder for querying BackupS3Config entities. +type BackupS3ConfigQuery struct { + config + ctx *QueryContext + order []backups3config.OrderOption + inters []Interceptor + predicates []predicate.BackupS3Config + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the BackupS3ConfigQuery builder. +func (_q *BackupS3ConfigQuery) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *BackupS3ConfigQuery) Limit(limit int) *BackupS3ConfigQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *BackupS3ConfigQuery) Offset(offset int) *BackupS3ConfigQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *BackupS3ConfigQuery) Unique(unique bool) *BackupS3ConfigQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *BackupS3ConfigQuery) Order(o ...backups3config.OrderOption) *BackupS3ConfigQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first BackupS3Config entity from the query. +// Returns a *NotFoundError when no BackupS3Config was found. +func (_q *BackupS3ConfigQuery) First(ctx context.Context) (*BackupS3Config, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{backups3config.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) FirstX(ctx context.Context) *BackupS3Config { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first BackupS3Config ID from the query. +// Returns a *NotFoundError when no BackupS3Config ID was found. +func (_q *BackupS3ConfigQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{backups3config.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) FirstIDX(ctx context.Context) int { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single BackupS3Config entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one BackupS3Config entity is found. +// Returns a *NotFoundError when no BackupS3Config entities are found. +func (_q *BackupS3ConfigQuery) Only(ctx context.Context) (*BackupS3Config, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{backups3config.Label} + default: + return nil, &NotSingularError{backups3config.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) OnlyX(ctx context.Context) *BackupS3Config { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only BackupS3Config ID in the query. +// Returns a *NotSingularError when more than one BackupS3Config ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *BackupS3ConfigQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{backups3config.Label} + default: + err = &NotSingularError{backups3config.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) OnlyIDX(ctx context.Context) int { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of BackupS3Configs. +func (_q *BackupS3ConfigQuery) All(ctx context.Context) ([]*BackupS3Config, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*BackupS3Config, *BackupS3ConfigQuery]() + return withInterceptors[[]*BackupS3Config](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) AllX(ctx context.Context) []*BackupS3Config { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of BackupS3Config IDs. +func (_q *BackupS3ConfigQuery) IDs(ctx context.Context) (ids []int, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(backups3config.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) IDsX(ctx context.Context) []int { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *BackupS3ConfigQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*BackupS3ConfigQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *BackupS3ConfigQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *BackupS3ConfigQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the BackupS3ConfigQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *BackupS3ConfigQuery) Clone() *BackupS3ConfigQuery { + if _q == nil { + return nil + } + return &BackupS3ConfigQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]backups3config.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.BackupS3Config{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// Enabled bool `json:"enabled,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.BackupS3Config.Query(). +// GroupBy(backups3config.FieldEnabled). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *BackupS3ConfigQuery) GroupBy(field string, fields ...string) *BackupS3ConfigGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &BackupS3ConfigGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = backups3config.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// Enabled bool `json:"enabled,omitempty"` +// } +// +// client.BackupS3Config.Query(). +// Select(backups3config.FieldEnabled). +// Scan(ctx, &v) +func (_q *BackupS3ConfigQuery) Select(fields ...string) *BackupS3ConfigSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &BackupS3ConfigSelect{BackupS3ConfigQuery: _q} + sbuild.label = backups3config.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BackupS3ConfigSelect configured with the given aggregations. +func (_q *BackupS3ConfigQuery) Aggregate(fns ...AggregateFunc) *BackupS3ConfigSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *BackupS3ConfigQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !backups3config.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *BackupS3ConfigQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupS3Config, error) { + var ( + nodes = []*BackupS3Config{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*BackupS3Config).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &BackupS3Config{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *BackupS3ConfigQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *BackupS3ConfigQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(backups3config.Table, backups3config.Columns, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backups3config.FieldID) + for i := range fields { + if fields[i] != backups3config.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *BackupS3ConfigQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(backups3config.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = backups3config.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BackupS3ConfigGroupBy is the group-by builder for BackupS3Config entities. +type BackupS3ConfigGroupBy struct { + selector + build *BackupS3ConfigQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *BackupS3ConfigGroupBy) Aggregate(fns ...AggregateFunc) *BackupS3ConfigGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *BackupS3ConfigGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupS3ConfigQuery, *BackupS3ConfigGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *BackupS3ConfigGroupBy) sqlScan(ctx context.Context, root *BackupS3ConfigQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// BackupS3ConfigSelect is the builder for selecting fields of BackupS3Config entities. +type BackupS3ConfigSelect struct { + *BackupS3ConfigQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *BackupS3ConfigSelect) Aggregate(fns ...AggregateFunc) *BackupS3ConfigSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *BackupS3ConfigSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupS3ConfigQuery, *BackupS3ConfigSelect](ctx, _s.BackupS3ConfigQuery, _s, _s.inters, v) +} + +func (_s *BackupS3ConfigSelect) sqlScan(ctx context.Context, root *BackupS3ConfigQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backup/ent/backups3config_update.go b/backup/ent/backups3config_update.go new file mode 100644 index 000000000..14354b9c9 --- /dev/null +++ b/backup/ent/backups3config_update.go @@ -0,0 +1,536 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupS3ConfigUpdate is the builder for updating BackupS3Config entities. +type BackupS3ConfigUpdate struct { + config + hooks []Hook + mutation *BackupS3ConfigMutation +} + +// Where appends a list predicates to the BackupS3ConfigUpdate builder. +func (_u *BackupS3ConfigUpdate) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetEnabled sets the "enabled" field. +func (_u *BackupS3ConfigUpdate) SetEnabled(v bool) *BackupS3ConfigUpdate { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableEnabled(v *bool) *BackupS3ConfigUpdate { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// SetEndpoint sets the "endpoint" field. +func (_u *BackupS3ConfigUpdate) SetEndpoint(v string) *BackupS3ConfigUpdate { + _u.mutation.SetEndpoint(v) + return _u +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableEndpoint(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetEndpoint(*v) + } + return _u +} + +// SetRegion sets the "region" field. +func (_u *BackupS3ConfigUpdate) SetRegion(v string) *BackupS3ConfigUpdate { + _u.mutation.SetRegion(v) + return _u +} + +// SetNillableRegion sets the "region" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableRegion(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetRegion(*v) + } + return _u +} + +// SetBucket sets the "bucket" field. +func (_u *BackupS3ConfigUpdate) SetBucket(v string) *BackupS3ConfigUpdate { + _u.mutation.SetBucket(v) + return _u +} + +// SetNillableBucket sets the "bucket" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableBucket(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetBucket(*v) + } + return _u +} + +// SetAccessKeyID sets the "access_key_id" field. +func (_u *BackupS3ConfigUpdate) SetAccessKeyID(v string) *BackupS3ConfigUpdate { + _u.mutation.SetAccessKeyID(v) + return _u +} + +// SetNillableAccessKeyID sets the "access_key_id" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableAccessKeyID(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetAccessKeyID(*v) + } + return _u +} + +// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. +func (_u *BackupS3ConfigUpdate) SetSecretAccessKeyEncrypted(v string) *BackupS3ConfigUpdate { + _u.mutation.SetSecretAccessKeyEncrypted(v) + return _u +} + +// SetNillableSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableSecretAccessKeyEncrypted(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetSecretAccessKeyEncrypted(*v) + } + return _u +} + +// ClearSecretAccessKeyEncrypted clears the value of the "secret_access_key_encrypted" field. +func (_u *BackupS3ConfigUpdate) ClearSecretAccessKeyEncrypted() *BackupS3ConfigUpdate { + _u.mutation.ClearSecretAccessKeyEncrypted() + return _u +} + +// SetPrefix sets the "prefix" field. +func (_u *BackupS3ConfigUpdate) SetPrefix(v string) *BackupS3ConfigUpdate { + _u.mutation.SetPrefix(v) + return _u +} + +// SetNillablePrefix sets the "prefix" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillablePrefix(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetPrefix(*v) + } + return _u +} + +// SetForcePathStyle sets the "force_path_style" field. +func (_u *BackupS3ConfigUpdate) SetForcePathStyle(v bool) *BackupS3ConfigUpdate { + _u.mutation.SetForcePathStyle(v) + return _u +} + +// SetNillableForcePathStyle sets the "force_path_style" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableForcePathStyle(v *bool) *BackupS3ConfigUpdate { + if v != nil { + _u.SetForcePathStyle(*v) + } + return _u +} + +// SetUseSsl sets the "use_ssl" field. +func (_u *BackupS3ConfigUpdate) SetUseSsl(v bool) *BackupS3ConfigUpdate { + _u.mutation.SetUseSsl(v) + return _u +} + +// SetNillableUseSsl sets the "use_ssl" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableUseSsl(v *bool) *BackupS3ConfigUpdate { + if v != nil { + _u.SetUseSsl(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupS3ConfigUpdate) SetUpdatedAt(v time.Time) *BackupS3ConfigUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the BackupS3ConfigMutation object of the builder. +func (_u *BackupS3ConfigUpdate) Mutation() *BackupS3ConfigMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *BackupS3ConfigUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupS3ConfigUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *BackupS3ConfigUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupS3ConfigUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupS3ConfigUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backups3config.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +func (_u *BackupS3ConfigUpdate) sqlSave(ctx context.Context) (_node int, err error) { + _spec := sqlgraph.NewUpdateSpec(backups3config.Table, backups3config.Columns, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.Endpoint(); ok { + _spec.SetField(backups3config.FieldEndpoint, field.TypeString, value) + } + if value, ok := _u.mutation.Region(); ok { + _spec.SetField(backups3config.FieldRegion, field.TypeString, value) + } + if value, ok := _u.mutation.Bucket(); ok { + _spec.SetField(backups3config.FieldBucket, field.TypeString, value) + } + if value, ok := _u.mutation.AccessKeyID(); ok { + _spec.SetField(backups3config.FieldAccessKeyID, field.TypeString, value) + } + if value, ok := _u.mutation.SecretAccessKeyEncrypted(); ok { + _spec.SetField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString, value) + } + if _u.mutation.SecretAccessKeyEncryptedCleared() { + _spec.ClearField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString) + } + if value, ok := _u.mutation.Prefix(); ok { + _spec.SetField(backups3config.FieldPrefix, field.TypeString, value) + } + if value, ok := _u.mutation.ForcePathStyle(); ok { + _spec.SetField(backups3config.FieldForcePathStyle, field.TypeBool, value) + } + if value, ok := _u.mutation.UseSsl(); ok { + _spec.SetField(backups3config.FieldUseSsl, field.TypeBool, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backups3config.FieldUpdatedAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backups3config.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// BackupS3ConfigUpdateOne is the builder for updating a single BackupS3Config entity. +type BackupS3ConfigUpdateOne struct { + config + fields []string + hooks []Hook + mutation *BackupS3ConfigMutation +} + +// SetEnabled sets the "enabled" field. +func (_u *BackupS3ConfigUpdateOne) SetEnabled(v bool) *BackupS3ConfigUpdateOne { + _u.mutation.SetEnabled(v) + return _u +} + +// SetNillableEnabled sets the "enabled" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableEnabled(v *bool) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetEnabled(*v) + } + return _u +} + +// SetEndpoint sets the "endpoint" field. +func (_u *BackupS3ConfigUpdateOne) SetEndpoint(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetEndpoint(v) + return _u +} + +// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableEndpoint(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetEndpoint(*v) + } + return _u +} + +// SetRegion sets the "region" field. +func (_u *BackupS3ConfigUpdateOne) SetRegion(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetRegion(v) + return _u +} + +// SetNillableRegion sets the "region" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableRegion(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetRegion(*v) + } + return _u +} + +// SetBucket sets the "bucket" field. +func (_u *BackupS3ConfigUpdateOne) SetBucket(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetBucket(v) + return _u +} + +// SetNillableBucket sets the "bucket" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableBucket(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetBucket(*v) + } + return _u +} + +// SetAccessKeyID sets the "access_key_id" field. +func (_u *BackupS3ConfigUpdateOne) SetAccessKeyID(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetAccessKeyID(v) + return _u +} + +// SetNillableAccessKeyID sets the "access_key_id" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableAccessKeyID(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetAccessKeyID(*v) + } + return _u +} + +// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. +func (_u *BackupS3ConfigUpdateOne) SetSecretAccessKeyEncrypted(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetSecretAccessKeyEncrypted(v) + return _u +} + +// SetNillableSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableSecretAccessKeyEncrypted(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetSecretAccessKeyEncrypted(*v) + } + return _u +} + +// ClearSecretAccessKeyEncrypted clears the value of the "secret_access_key_encrypted" field. +func (_u *BackupS3ConfigUpdateOne) ClearSecretAccessKeyEncrypted() *BackupS3ConfigUpdateOne { + _u.mutation.ClearSecretAccessKeyEncrypted() + return _u +} + +// SetPrefix sets the "prefix" field. +func (_u *BackupS3ConfigUpdateOne) SetPrefix(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetPrefix(v) + return _u +} + +// SetNillablePrefix sets the "prefix" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillablePrefix(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetPrefix(*v) + } + return _u +} + +// SetForcePathStyle sets the "force_path_style" field. +func (_u *BackupS3ConfigUpdateOne) SetForcePathStyle(v bool) *BackupS3ConfigUpdateOne { + _u.mutation.SetForcePathStyle(v) + return _u +} + +// SetNillableForcePathStyle sets the "force_path_style" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableForcePathStyle(v *bool) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetForcePathStyle(*v) + } + return _u +} + +// SetUseSsl sets the "use_ssl" field. +func (_u *BackupS3ConfigUpdateOne) SetUseSsl(v bool) *BackupS3ConfigUpdateOne { + _u.mutation.SetUseSsl(v) + return _u +} + +// SetNillableUseSsl sets the "use_ssl" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableUseSsl(v *bool) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetUseSsl(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupS3ConfigUpdateOne) SetUpdatedAt(v time.Time) *BackupS3ConfigUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the BackupS3ConfigMutation object of the builder. +func (_u *BackupS3ConfigUpdateOne) Mutation() *BackupS3ConfigMutation { + return _u.mutation +} + +// Where appends a list predicates to the BackupS3ConfigUpdate builder. +func (_u *BackupS3ConfigUpdateOne) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *BackupS3ConfigUpdateOne) Select(field string, fields ...string) *BackupS3ConfigUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated BackupS3Config entity. +func (_u *BackupS3ConfigUpdateOne) Save(ctx context.Context) (*BackupS3Config, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupS3ConfigUpdateOne) SaveX(ctx context.Context) *BackupS3Config { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *BackupS3ConfigUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupS3ConfigUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupS3ConfigUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backups3config.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +func (_u *BackupS3ConfigUpdateOne) sqlSave(ctx context.Context) (_node *BackupS3Config, err error) { + _spec := sqlgraph.NewUpdateSpec(backups3config.Table, backups3config.Columns, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupS3Config.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backups3config.FieldID) + for _, f := range fields { + if !backups3config.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != backups3config.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.Enabled(); ok { + _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) + } + if value, ok := _u.mutation.Endpoint(); ok { + _spec.SetField(backups3config.FieldEndpoint, field.TypeString, value) + } + if value, ok := _u.mutation.Region(); ok { + _spec.SetField(backups3config.FieldRegion, field.TypeString, value) + } + if value, ok := _u.mutation.Bucket(); ok { + _spec.SetField(backups3config.FieldBucket, field.TypeString, value) + } + if value, ok := _u.mutation.AccessKeyID(); ok { + _spec.SetField(backups3config.FieldAccessKeyID, field.TypeString, value) + } + if value, ok := _u.mutation.SecretAccessKeyEncrypted(); ok { + _spec.SetField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString, value) + } + if _u.mutation.SecretAccessKeyEncryptedCleared() { + _spec.ClearField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString) + } + if value, ok := _u.mutation.Prefix(); ok { + _spec.SetField(backups3config.FieldPrefix, field.TypeString, value) + } + if value, ok := _u.mutation.ForcePathStyle(); ok { + _spec.SetField(backups3config.FieldForcePathStyle, field.TypeBool, value) + } + if value, ok := _u.mutation.UseSsl(); ok { + _spec.SetField(backups3config.FieldUseSsl, field.TypeBool, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backups3config.FieldUpdatedAt, field.TypeTime, value) + } + _node = &BackupS3Config{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backups3config.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backup/ent/backupsetting.go b/backup/ent/backupsetting.go new file mode 100644 index 000000000..8853d6529 --- /dev/null +++ b/backup/ent/backupsetting.go @@ -0,0 +1,172 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" +) + +// BackupSetting is the model entity for the BackupSetting schema. +type BackupSetting struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // SourceMode holds the value of the "source_mode" field. + SourceMode backupsetting.SourceMode `json:"source_mode,omitempty"` + // BackupRoot holds the value of the "backup_root" field. + BackupRoot string `json:"backup_root,omitempty"` + // RetentionDays holds the value of the "retention_days" field. + RetentionDays int `json:"retention_days,omitempty"` + // KeepLast holds the value of the "keep_last" field. + KeepLast int `json:"keep_last,omitempty"` + // SqlitePath holds the value of the "sqlite_path" field. + SqlitePath string `json:"sqlite_path,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*BackupSetting) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case backupsetting.FieldID, backupsetting.FieldRetentionDays, backupsetting.FieldKeepLast: + values[i] = new(sql.NullInt64) + case backupsetting.FieldSourceMode, backupsetting.FieldBackupRoot, backupsetting.FieldSqlitePath: + values[i] = new(sql.NullString) + case backupsetting.FieldCreatedAt, backupsetting.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the BackupSetting fields. +func (_m *BackupSetting) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case backupsetting.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int(value.Int64) + case backupsetting.FieldSourceMode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field source_mode", values[i]) + } else if value.Valid { + _m.SourceMode = backupsetting.SourceMode(value.String) + } + case backupsetting.FieldBackupRoot: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field backup_root", values[i]) + } else if value.Valid { + _m.BackupRoot = value.String + } + case backupsetting.FieldRetentionDays: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field retention_days", values[i]) + } else if value.Valid { + _m.RetentionDays = int(value.Int64) + } + case backupsetting.FieldKeepLast: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field keep_last", values[i]) + } else if value.Valid { + _m.KeepLast = int(value.Int64) + } + case backupsetting.FieldSqlitePath: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field sqlite_path", values[i]) + } else if value.Valid { + _m.SqlitePath = value.String + } + case backupsetting.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case backupsetting.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the BackupSetting. +// This includes values selected through modifiers, order, etc. +func (_m *BackupSetting) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this BackupSetting. +// Note that you need to call BackupSetting.Unwrap() before calling this method if this BackupSetting +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *BackupSetting) Update() *BackupSettingUpdateOne { + return NewBackupSettingClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the BackupSetting entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *BackupSetting) Unwrap() *BackupSetting { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: BackupSetting is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *BackupSetting) String() string { + var builder strings.Builder + builder.WriteString("BackupSetting(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("source_mode=") + builder.WriteString(fmt.Sprintf("%v", _m.SourceMode)) + builder.WriteString(", ") + builder.WriteString("backup_root=") + builder.WriteString(_m.BackupRoot) + builder.WriteString(", ") + builder.WriteString("retention_days=") + builder.WriteString(fmt.Sprintf("%v", _m.RetentionDays)) + builder.WriteString(", ") + builder.WriteString("keep_last=") + builder.WriteString(fmt.Sprintf("%v", _m.KeepLast)) + builder.WriteString(", ") + builder.WriteString("sqlite_path=") + builder.WriteString(_m.SqlitePath) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// BackupSettings is a parsable slice of BackupSetting. +type BackupSettings []*BackupSetting diff --git a/backup/ent/backupsetting/backupsetting.go b/backup/ent/backupsetting/backupsetting.go new file mode 100644 index 000000000..5dc076825 --- /dev/null +++ b/backup/ent/backupsetting/backupsetting.go @@ -0,0 +1,141 @@ +// Code generated by ent, DO NOT EDIT. + +package backupsetting + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the backupsetting type in the database. + Label = "backup_setting" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldSourceMode holds the string denoting the source_mode field in the database. + FieldSourceMode = "source_mode" + // FieldBackupRoot holds the string denoting the backup_root field in the database. + FieldBackupRoot = "backup_root" + // FieldRetentionDays holds the string denoting the retention_days field in the database. + FieldRetentionDays = "retention_days" + // FieldKeepLast holds the string denoting the keep_last field in the database. + FieldKeepLast = "keep_last" + // FieldSqlitePath holds the string denoting the sqlite_path field in the database. + FieldSqlitePath = "sqlite_path" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // Table holds the table name of the backupsetting in the database. + Table = "backup_settings" +) + +// Columns holds all SQL columns for backupsetting fields. +var Columns = []string{ + FieldID, + FieldSourceMode, + FieldBackupRoot, + FieldRetentionDays, + FieldKeepLast, + FieldSqlitePath, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultBackupRoot holds the default value on creation for the "backup_root" field. + DefaultBackupRoot string + // DefaultRetentionDays holds the default value on creation for the "retention_days" field. + DefaultRetentionDays int + // DefaultKeepLast holds the default value on creation for the "keep_last" field. + DefaultKeepLast int + // DefaultSqlitePath holds the default value on creation for the "sqlite_path" field. + DefaultSqlitePath string + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// SourceMode defines the type for the "source_mode" enum field. +type SourceMode string + +// SourceModeDirect is the default value of the SourceMode enum. +const DefaultSourceMode = SourceModeDirect + +// SourceMode values. +const ( + SourceModeDirect SourceMode = "direct" + SourceModeDockerExec SourceMode = "docker_exec" +) + +func (sm SourceMode) String() string { + return string(sm) +} + +// SourceModeValidator is a validator for the "source_mode" field enum values. It is called by the builders before save. +func SourceModeValidator(sm SourceMode) error { + switch sm { + case SourceModeDirect, SourceModeDockerExec: + return nil + default: + return fmt.Errorf("backupsetting: invalid enum value for source_mode field: %q", sm) + } +} + +// OrderOption defines the ordering options for the BackupSetting queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// BySourceMode orders the results by the source_mode field. +func BySourceMode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceMode, opts...).ToFunc() +} + +// ByBackupRoot orders the results by the backup_root field. +func ByBackupRoot(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBackupRoot, opts...).ToFunc() +} + +// ByRetentionDays orders the results by the retention_days field. +func ByRetentionDays(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRetentionDays, opts...).ToFunc() +} + +// ByKeepLast orders the results by the keep_last field. +func ByKeepLast(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldKeepLast, opts...).ToFunc() +} + +// BySqlitePath orders the results by the sqlite_path field. +func BySqlitePath(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSqlitePath, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} diff --git a/backup/ent/backupsetting/where.go b/backup/ent/backupsetting/where.go new file mode 100644 index 000000000..29d5a860b --- /dev/null +++ b/backup/ent/backupsetting/where.go @@ -0,0 +1,410 @@ +// Code generated by ent, DO NOT EDIT. + +package backupsetting + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldID, id)) +} + +// BackupRoot applies equality check predicate on the "backup_root" field. It's identical to BackupRootEQ. +func BackupRoot(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldBackupRoot, v)) +} + +// RetentionDays applies equality check predicate on the "retention_days" field. It's identical to RetentionDaysEQ. +func RetentionDays(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldRetentionDays, v)) +} + +// KeepLast applies equality check predicate on the "keep_last" field. It's identical to KeepLastEQ. +func KeepLast(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldKeepLast, v)) +} + +// SqlitePath applies equality check predicate on the "sqlite_path" field. It's identical to SqlitePathEQ. +func SqlitePath(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldSqlitePath, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// SourceModeEQ applies the EQ predicate on the "source_mode" field. +func SourceModeEQ(v SourceMode) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldSourceMode, v)) +} + +// SourceModeNEQ applies the NEQ predicate on the "source_mode" field. +func SourceModeNEQ(v SourceMode) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldSourceMode, v)) +} + +// SourceModeIn applies the In predicate on the "source_mode" field. +func SourceModeIn(vs ...SourceMode) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldSourceMode, vs...)) +} + +// SourceModeNotIn applies the NotIn predicate on the "source_mode" field. +func SourceModeNotIn(vs ...SourceMode) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldSourceMode, vs...)) +} + +// BackupRootEQ applies the EQ predicate on the "backup_root" field. +func BackupRootEQ(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldBackupRoot, v)) +} + +// BackupRootNEQ applies the NEQ predicate on the "backup_root" field. +func BackupRootNEQ(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldBackupRoot, v)) +} + +// BackupRootIn applies the In predicate on the "backup_root" field. +func BackupRootIn(vs ...string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldBackupRoot, vs...)) +} + +// BackupRootNotIn applies the NotIn predicate on the "backup_root" field. +func BackupRootNotIn(vs ...string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldBackupRoot, vs...)) +} + +// BackupRootGT applies the GT predicate on the "backup_root" field. +func BackupRootGT(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldBackupRoot, v)) +} + +// BackupRootGTE applies the GTE predicate on the "backup_root" field. +func BackupRootGTE(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldBackupRoot, v)) +} + +// BackupRootLT applies the LT predicate on the "backup_root" field. +func BackupRootLT(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldBackupRoot, v)) +} + +// BackupRootLTE applies the LTE predicate on the "backup_root" field. +func BackupRootLTE(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldBackupRoot, v)) +} + +// BackupRootContains applies the Contains predicate on the "backup_root" field. +func BackupRootContains(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldContains(FieldBackupRoot, v)) +} + +// BackupRootHasPrefix applies the HasPrefix predicate on the "backup_root" field. +func BackupRootHasPrefix(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldHasPrefix(FieldBackupRoot, v)) +} + +// BackupRootHasSuffix applies the HasSuffix predicate on the "backup_root" field. +func BackupRootHasSuffix(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldHasSuffix(FieldBackupRoot, v)) +} + +// BackupRootEqualFold applies the EqualFold predicate on the "backup_root" field. +func BackupRootEqualFold(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEqualFold(FieldBackupRoot, v)) +} + +// BackupRootContainsFold applies the ContainsFold predicate on the "backup_root" field. +func BackupRootContainsFold(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldContainsFold(FieldBackupRoot, v)) +} + +// RetentionDaysEQ applies the EQ predicate on the "retention_days" field. +func RetentionDaysEQ(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldRetentionDays, v)) +} + +// RetentionDaysNEQ applies the NEQ predicate on the "retention_days" field. +func RetentionDaysNEQ(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldRetentionDays, v)) +} + +// RetentionDaysIn applies the In predicate on the "retention_days" field. +func RetentionDaysIn(vs ...int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldRetentionDays, vs...)) +} + +// RetentionDaysNotIn applies the NotIn predicate on the "retention_days" field. +func RetentionDaysNotIn(vs ...int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldRetentionDays, vs...)) +} + +// RetentionDaysGT applies the GT predicate on the "retention_days" field. +func RetentionDaysGT(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldRetentionDays, v)) +} + +// RetentionDaysGTE applies the GTE predicate on the "retention_days" field. +func RetentionDaysGTE(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldRetentionDays, v)) +} + +// RetentionDaysLT applies the LT predicate on the "retention_days" field. +func RetentionDaysLT(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldRetentionDays, v)) +} + +// RetentionDaysLTE applies the LTE predicate on the "retention_days" field. +func RetentionDaysLTE(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldRetentionDays, v)) +} + +// KeepLastEQ applies the EQ predicate on the "keep_last" field. +func KeepLastEQ(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldKeepLast, v)) +} + +// KeepLastNEQ applies the NEQ predicate on the "keep_last" field. +func KeepLastNEQ(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldKeepLast, v)) +} + +// KeepLastIn applies the In predicate on the "keep_last" field. +func KeepLastIn(vs ...int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldKeepLast, vs...)) +} + +// KeepLastNotIn applies the NotIn predicate on the "keep_last" field. +func KeepLastNotIn(vs ...int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldKeepLast, vs...)) +} + +// KeepLastGT applies the GT predicate on the "keep_last" field. +func KeepLastGT(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldKeepLast, v)) +} + +// KeepLastGTE applies the GTE predicate on the "keep_last" field. +func KeepLastGTE(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldKeepLast, v)) +} + +// KeepLastLT applies the LT predicate on the "keep_last" field. +func KeepLastLT(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldKeepLast, v)) +} + +// KeepLastLTE applies the LTE predicate on the "keep_last" field. +func KeepLastLTE(v int) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldKeepLast, v)) +} + +// SqlitePathEQ applies the EQ predicate on the "sqlite_path" field. +func SqlitePathEQ(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldSqlitePath, v)) +} + +// SqlitePathNEQ applies the NEQ predicate on the "sqlite_path" field. +func SqlitePathNEQ(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldSqlitePath, v)) +} + +// SqlitePathIn applies the In predicate on the "sqlite_path" field. +func SqlitePathIn(vs ...string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldSqlitePath, vs...)) +} + +// SqlitePathNotIn applies the NotIn predicate on the "sqlite_path" field. +func SqlitePathNotIn(vs ...string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldSqlitePath, vs...)) +} + +// SqlitePathGT applies the GT predicate on the "sqlite_path" field. +func SqlitePathGT(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldSqlitePath, v)) +} + +// SqlitePathGTE applies the GTE predicate on the "sqlite_path" field. +func SqlitePathGTE(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldSqlitePath, v)) +} + +// SqlitePathLT applies the LT predicate on the "sqlite_path" field. +func SqlitePathLT(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldSqlitePath, v)) +} + +// SqlitePathLTE applies the LTE predicate on the "sqlite_path" field. +func SqlitePathLTE(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldSqlitePath, v)) +} + +// SqlitePathContains applies the Contains predicate on the "sqlite_path" field. +func SqlitePathContains(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldContains(FieldSqlitePath, v)) +} + +// SqlitePathHasPrefix applies the HasPrefix predicate on the "sqlite_path" field. +func SqlitePathHasPrefix(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldHasPrefix(FieldSqlitePath, v)) +} + +// SqlitePathHasSuffix applies the HasSuffix predicate on the "sqlite_path" field. +func SqlitePathHasSuffix(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldHasSuffix(FieldSqlitePath, v)) +} + +// SqlitePathEqualFold applies the EqualFold predicate on the "sqlite_path" field. +func SqlitePathEqualFold(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEqualFold(FieldSqlitePath, v)) +} + +// SqlitePathContainsFold applies the ContainsFold predicate on the "sqlite_path" field. +func SqlitePathContainsFold(v string) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldContainsFold(FieldSqlitePath, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.BackupSetting { + return predicate.BackupSetting(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.BackupSetting) predicate.BackupSetting { + return predicate.BackupSetting(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.BackupSetting) predicate.BackupSetting { + return predicate.BackupSetting(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.BackupSetting) predicate.BackupSetting { + return predicate.BackupSetting(sql.NotPredicates(p)) +} diff --git a/backup/ent/backupsetting_create.go b/backup/ent/backupsetting_create.go new file mode 100644 index 000000000..736b7d5cf --- /dev/null +++ b/backup/ent/backupsetting_create.go @@ -0,0 +1,357 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" +) + +// BackupSettingCreate is the builder for creating a BackupSetting entity. +type BackupSettingCreate struct { + config + mutation *BackupSettingMutation + hooks []Hook +} + +// SetSourceMode sets the "source_mode" field. +func (_c *BackupSettingCreate) SetSourceMode(v backupsetting.SourceMode) *BackupSettingCreate { + _c.mutation.SetSourceMode(v) + return _c +} + +// SetNillableSourceMode sets the "source_mode" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableSourceMode(v *backupsetting.SourceMode) *BackupSettingCreate { + if v != nil { + _c.SetSourceMode(*v) + } + return _c +} + +// SetBackupRoot sets the "backup_root" field. +func (_c *BackupSettingCreate) SetBackupRoot(v string) *BackupSettingCreate { + _c.mutation.SetBackupRoot(v) + return _c +} + +// SetNillableBackupRoot sets the "backup_root" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableBackupRoot(v *string) *BackupSettingCreate { + if v != nil { + _c.SetBackupRoot(*v) + } + return _c +} + +// SetRetentionDays sets the "retention_days" field. +func (_c *BackupSettingCreate) SetRetentionDays(v int) *BackupSettingCreate { + _c.mutation.SetRetentionDays(v) + return _c +} + +// SetNillableRetentionDays sets the "retention_days" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableRetentionDays(v *int) *BackupSettingCreate { + if v != nil { + _c.SetRetentionDays(*v) + } + return _c +} + +// SetKeepLast sets the "keep_last" field. +func (_c *BackupSettingCreate) SetKeepLast(v int) *BackupSettingCreate { + _c.mutation.SetKeepLast(v) + return _c +} + +// SetNillableKeepLast sets the "keep_last" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableKeepLast(v *int) *BackupSettingCreate { + if v != nil { + _c.SetKeepLast(*v) + } + return _c +} + +// SetSqlitePath sets the "sqlite_path" field. +func (_c *BackupSettingCreate) SetSqlitePath(v string) *BackupSettingCreate { + _c.mutation.SetSqlitePath(v) + return _c +} + +// SetNillableSqlitePath sets the "sqlite_path" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableSqlitePath(v *string) *BackupSettingCreate { + if v != nil { + _c.SetSqlitePath(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *BackupSettingCreate) SetCreatedAt(v time.Time) *BackupSettingCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableCreatedAt(v *time.Time) *BackupSettingCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *BackupSettingCreate) SetUpdatedAt(v time.Time) *BackupSettingCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *BackupSettingCreate) SetNillableUpdatedAt(v *time.Time) *BackupSettingCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// Mutation returns the BackupSettingMutation object of the builder. +func (_c *BackupSettingCreate) Mutation() *BackupSettingMutation { + return _c.mutation +} + +// Save creates the BackupSetting in the database. +func (_c *BackupSettingCreate) Save(ctx context.Context) (*BackupSetting, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *BackupSettingCreate) SaveX(ctx context.Context) *BackupSetting { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupSettingCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupSettingCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *BackupSettingCreate) defaults() { + if _, ok := _c.mutation.SourceMode(); !ok { + v := backupsetting.DefaultSourceMode + _c.mutation.SetSourceMode(v) + } + if _, ok := _c.mutation.BackupRoot(); !ok { + v := backupsetting.DefaultBackupRoot + _c.mutation.SetBackupRoot(v) + } + if _, ok := _c.mutation.RetentionDays(); !ok { + v := backupsetting.DefaultRetentionDays + _c.mutation.SetRetentionDays(v) + } + if _, ok := _c.mutation.KeepLast(); !ok { + v := backupsetting.DefaultKeepLast + _c.mutation.SetKeepLast(v) + } + if _, ok := _c.mutation.SqlitePath(); !ok { + v := backupsetting.DefaultSqlitePath + _c.mutation.SetSqlitePath(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := backupsetting.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := backupsetting.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *BackupSettingCreate) check() error { + if _, ok := _c.mutation.SourceMode(); !ok { + return &ValidationError{Name: "source_mode", err: errors.New(`ent: missing required field "BackupSetting.source_mode"`)} + } + if v, ok := _c.mutation.SourceMode(); ok { + if err := backupsetting.SourceModeValidator(v); err != nil { + return &ValidationError{Name: "source_mode", err: fmt.Errorf(`ent: validator failed for field "BackupSetting.source_mode": %w`, err)} + } + } + if _, ok := _c.mutation.BackupRoot(); !ok { + return &ValidationError{Name: "backup_root", err: errors.New(`ent: missing required field "BackupSetting.backup_root"`)} + } + if _, ok := _c.mutation.RetentionDays(); !ok { + return &ValidationError{Name: "retention_days", err: errors.New(`ent: missing required field "BackupSetting.retention_days"`)} + } + if _, ok := _c.mutation.KeepLast(); !ok { + return &ValidationError{Name: "keep_last", err: errors.New(`ent: missing required field "BackupSetting.keep_last"`)} + } + if _, ok := _c.mutation.SqlitePath(); !ok { + return &ValidationError{Name: "sqlite_path", err: errors.New(`ent: missing required field "BackupSetting.sqlite_path"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupSetting.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupSetting.updated_at"`)} + } + return nil +} + +func (_c *BackupSettingCreate) sqlSave(ctx context.Context) (*BackupSetting, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *BackupSettingCreate) createSpec() (*BackupSetting, *sqlgraph.CreateSpec) { + var ( + _node = &BackupSetting{config: _c.config} + _spec = sqlgraph.NewCreateSpec(backupsetting.Table, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) + ) + if value, ok := _c.mutation.SourceMode(); ok { + _spec.SetField(backupsetting.FieldSourceMode, field.TypeEnum, value) + _node.SourceMode = value + } + if value, ok := _c.mutation.BackupRoot(); ok { + _spec.SetField(backupsetting.FieldBackupRoot, field.TypeString, value) + _node.BackupRoot = value + } + if value, ok := _c.mutation.RetentionDays(); ok { + _spec.SetField(backupsetting.FieldRetentionDays, field.TypeInt, value) + _node.RetentionDays = value + } + if value, ok := _c.mutation.KeepLast(); ok { + _spec.SetField(backupsetting.FieldKeepLast, field.TypeInt, value) + _node.KeepLast = value + } + if value, ok := _c.mutation.SqlitePath(); ok { + _spec.SetField(backupsetting.FieldSqlitePath, field.TypeString, value) + _node.SqlitePath = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(backupsetting.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(backupsetting.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + return _node, _spec +} + +// BackupSettingCreateBulk is the builder for creating many BackupSetting entities in bulk. +type BackupSettingCreateBulk struct { + config + err error + builders []*BackupSettingCreate +} + +// Save creates the BackupSetting entities in the database. +func (_c *BackupSettingCreateBulk) Save(ctx context.Context) ([]*BackupSetting, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*BackupSetting, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BackupSettingMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *BackupSettingCreateBulk) SaveX(ctx context.Context) []*BackupSetting { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupSettingCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupSettingCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupsetting_delete.go b/backup/ent/backupsetting_delete.go new file mode 100644 index 000000000..c672e6f1c --- /dev/null +++ b/backup/ent/backupsetting_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupSettingDelete is the builder for deleting a BackupSetting entity. +type BackupSettingDelete struct { + config + hooks []Hook + mutation *BackupSettingMutation +} + +// Where appends a list predicates to the BackupSettingDelete builder. +func (_d *BackupSettingDelete) Where(ps ...predicate.BackupSetting) *BackupSettingDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *BackupSettingDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupSettingDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *BackupSettingDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(backupsetting.Table, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// BackupSettingDeleteOne is the builder for deleting a single BackupSetting entity. +type BackupSettingDeleteOne struct { + _d *BackupSettingDelete +} + +// Where appends a list predicates to the BackupSettingDelete builder. +func (_d *BackupSettingDeleteOne) Where(ps ...predicate.BackupSetting) *BackupSettingDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *BackupSettingDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{backupsetting.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupSettingDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupsetting_query.go b/backup/ent/backupsetting_query.go new file mode 100644 index 000000000..6e4f75249 --- /dev/null +++ b/backup/ent/backupsetting_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupSettingQuery is the builder for querying BackupSetting entities. +type BackupSettingQuery struct { + config + ctx *QueryContext + order []backupsetting.OrderOption + inters []Interceptor + predicates []predicate.BackupSetting + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the BackupSettingQuery builder. +func (_q *BackupSettingQuery) Where(ps ...predicate.BackupSetting) *BackupSettingQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *BackupSettingQuery) Limit(limit int) *BackupSettingQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *BackupSettingQuery) Offset(offset int) *BackupSettingQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *BackupSettingQuery) Unique(unique bool) *BackupSettingQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *BackupSettingQuery) Order(o ...backupsetting.OrderOption) *BackupSettingQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first BackupSetting entity from the query. +// Returns a *NotFoundError when no BackupSetting was found. +func (_q *BackupSettingQuery) First(ctx context.Context) (*BackupSetting, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{backupsetting.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *BackupSettingQuery) FirstX(ctx context.Context) *BackupSetting { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first BackupSetting ID from the query. +// Returns a *NotFoundError when no BackupSetting ID was found. +func (_q *BackupSettingQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{backupsetting.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *BackupSettingQuery) FirstIDX(ctx context.Context) int { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single BackupSetting entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one BackupSetting entity is found. +// Returns a *NotFoundError when no BackupSetting entities are found. +func (_q *BackupSettingQuery) Only(ctx context.Context) (*BackupSetting, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{backupsetting.Label} + default: + return nil, &NotSingularError{backupsetting.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *BackupSettingQuery) OnlyX(ctx context.Context) *BackupSetting { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only BackupSetting ID in the query. +// Returns a *NotSingularError when more than one BackupSetting ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *BackupSettingQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{backupsetting.Label} + default: + err = &NotSingularError{backupsetting.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *BackupSettingQuery) OnlyIDX(ctx context.Context) int { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of BackupSettings. +func (_q *BackupSettingQuery) All(ctx context.Context) ([]*BackupSetting, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*BackupSetting, *BackupSettingQuery]() + return withInterceptors[[]*BackupSetting](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *BackupSettingQuery) AllX(ctx context.Context) []*BackupSetting { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of BackupSetting IDs. +func (_q *BackupSettingQuery) IDs(ctx context.Context) (ids []int, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(backupsetting.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *BackupSettingQuery) IDsX(ctx context.Context) []int { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *BackupSettingQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*BackupSettingQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *BackupSettingQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *BackupSettingQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *BackupSettingQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the BackupSettingQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *BackupSettingQuery) Clone() *BackupSettingQuery { + if _q == nil { + return nil + } + return &BackupSettingQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]backupsetting.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.BackupSetting{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// SourceMode backupsetting.SourceMode `json:"source_mode,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.BackupSetting.Query(). +// GroupBy(backupsetting.FieldSourceMode). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *BackupSettingQuery) GroupBy(field string, fields ...string) *BackupSettingGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &BackupSettingGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = backupsetting.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// SourceMode backupsetting.SourceMode `json:"source_mode,omitempty"` +// } +// +// client.BackupSetting.Query(). +// Select(backupsetting.FieldSourceMode). +// Scan(ctx, &v) +func (_q *BackupSettingQuery) Select(fields ...string) *BackupSettingSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &BackupSettingSelect{BackupSettingQuery: _q} + sbuild.label = backupsetting.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BackupSettingSelect configured with the given aggregations. +func (_q *BackupSettingQuery) Aggregate(fns ...AggregateFunc) *BackupSettingSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *BackupSettingQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !backupsetting.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *BackupSettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupSetting, error) { + var ( + nodes = []*BackupSetting{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*BackupSetting).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &BackupSetting{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *BackupSettingQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *BackupSettingQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(backupsetting.Table, backupsetting.Columns, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupsetting.FieldID) + for i := range fields { + if fields[i] != backupsetting.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *BackupSettingQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(backupsetting.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = backupsetting.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BackupSettingGroupBy is the group-by builder for BackupSetting entities. +type BackupSettingGroupBy struct { + selector + build *BackupSettingQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *BackupSettingGroupBy) Aggregate(fns ...AggregateFunc) *BackupSettingGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *BackupSettingGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupSettingQuery, *BackupSettingGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *BackupSettingGroupBy) sqlScan(ctx context.Context, root *BackupSettingQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// BackupSettingSelect is the builder for selecting fields of BackupSetting entities. +type BackupSettingSelect struct { + *BackupSettingQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *BackupSettingSelect) Aggregate(fns ...AggregateFunc) *BackupSettingSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *BackupSettingSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupSettingQuery, *BackupSettingSelect](ctx, _s.BackupSettingQuery, _s, _s.inters, v) +} + +func (_s *BackupSettingSelect) sqlScan(ctx context.Context, root *BackupSettingQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backup/ent/backupsetting_update.go b/backup/ent/backupsetting_update.go new file mode 100644 index 000000000..04b5af80f --- /dev/null +++ b/backup/ent/backupsetting_update.go @@ -0,0 +1,448 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupSettingUpdate is the builder for updating BackupSetting entities. +type BackupSettingUpdate struct { + config + hooks []Hook + mutation *BackupSettingMutation +} + +// Where appends a list predicates to the BackupSettingUpdate builder. +func (_u *BackupSettingUpdate) Where(ps ...predicate.BackupSetting) *BackupSettingUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetSourceMode sets the "source_mode" field. +func (_u *BackupSettingUpdate) SetSourceMode(v backupsetting.SourceMode) *BackupSettingUpdate { + _u.mutation.SetSourceMode(v) + return _u +} + +// SetNillableSourceMode sets the "source_mode" field if the given value is not nil. +func (_u *BackupSettingUpdate) SetNillableSourceMode(v *backupsetting.SourceMode) *BackupSettingUpdate { + if v != nil { + _u.SetSourceMode(*v) + } + return _u +} + +// SetBackupRoot sets the "backup_root" field. +func (_u *BackupSettingUpdate) SetBackupRoot(v string) *BackupSettingUpdate { + _u.mutation.SetBackupRoot(v) + return _u +} + +// SetNillableBackupRoot sets the "backup_root" field if the given value is not nil. +func (_u *BackupSettingUpdate) SetNillableBackupRoot(v *string) *BackupSettingUpdate { + if v != nil { + _u.SetBackupRoot(*v) + } + return _u +} + +// SetRetentionDays sets the "retention_days" field. +func (_u *BackupSettingUpdate) SetRetentionDays(v int) *BackupSettingUpdate { + _u.mutation.ResetRetentionDays() + _u.mutation.SetRetentionDays(v) + return _u +} + +// SetNillableRetentionDays sets the "retention_days" field if the given value is not nil. +func (_u *BackupSettingUpdate) SetNillableRetentionDays(v *int) *BackupSettingUpdate { + if v != nil { + _u.SetRetentionDays(*v) + } + return _u +} + +// AddRetentionDays adds value to the "retention_days" field. +func (_u *BackupSettingUpdate) AddRetentionDays(v int) *BackupSettingUpdate { + _u.mutation.AddRetentionDays(v) + return _u +} + +// SetKeepLast sets the "keep_last" field. +func (_u *BackupSettingUpdate) SetKeepLast(v int) *BackupSettingUpdate { + _u.mutation.ResetKeepLast() + _u.mutation.SetKeepLast(v) + return _u +} + +// SetNillableKeepLast sets the "keep_last" field if the given value is not nil. +func (_u *BackupSettingUpdate) SetNillableKeepLast(v *int) *BackupSettingUpdate { + if v != nil { + _u.SetKeepLast(*v) + } + return _u +} + +// AddKeepLast adds value to the "keep_last" field. +func (_u *BackupSettingUpdate) AddKeepLast(v int) *BackupSettingUpdate { + _u.mutation.AddKeepLast(v) + return _u +} + +// SetSqlitePath sets the "sqlite_path" field. +func (_u *BackupSettingUpdate) SetSqlitePath(v string) *BackupSettingUpdate { + _u.mutation.SetSqlitePath(v) + return _u +} + +// SetNillableSqlitePath sets the "sqlite_path" field if the given value is not nil. +func (_u *BackupSettingUpdate) SetNillableSqlitePath(v *string) *BackupSettingUpdate { + if v != nil { + _u.SetSqlitePath(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupSettingUpdate) SetUpdatedAt(v time.Time) *BackupSettingUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the BackupSettingMutation object of the builder. +func (_u *BackupSettingUpdate) Mutation() *BackupSettingMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *BackupSettingUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupSettingUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *BackupSettingUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupSettingUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupSettingUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backupsetting.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupSettingUpdate) check() error { + if v, ok := _u.mutation.SourceMode(); ok { + if err := backupsetting.SourceModeValidator(v); err != nil { + return &ValidationError{Name: "source_mode", err: fmt.Errorf(`ent: validator failed for field "BackupSetting.source_mode": %w`, err)} + } + } + return nil +} + +func (_u *BackupSettingUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupsetting.Table, backupsetting.Columns, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.SourceMode(); ok { + _spec.SetField(backupsetting.FieldSourceMode, field.TypeEnum, value) + } + if value, ok := _u.mutation.BackupRoot(); ok { + _spec.SetField(backupsetting.FieldBackupRoot, field.TypeString, value) + } + if value, ok := _u.mutation.RetentionDays(); ok { + _spec.SetField(backupsetting.FieldRetentionDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedRetentionDays(); ok { + _spec.AddField(backupsetting.FieldRetentionDays, field.TypeInt, value) + } + if value, ok := _u.mutation.KeepLast(); ok { + _spec.SetField(backupsetting.FieldKeepLast, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedKeepLast(); ok { + _spec.AddField(backupsetting.FieldKeepLast, field.TypeInt, value) + } + if value, ok := _u.mutation.SqlitePath(); ok { + _spec.SetField(backupsetting.FieldSqlitePath, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backupsetting.FieldUpdatedAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupsetting.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// BackupSettingUpdateOne is the builder for updating a single BackupSetting entity. +type BackupSettingUpdateOne struct { + config + fields []string + hooks []Hook + mutation *BackupSettingMutation +} + +// SetSourceMode sets the "source_mode" field. +func (_u *BackupSettingUpdateOne) SetSourceMode(v backupsetting.SourceMode) *BackupSettingUpdateOne { + _u.mutation.SetSourceMode(v) + return _u +} + +// SetNillableSourceMode sets the "source_mode" field if the given value is not nil. +func (_u *BackupSettingUpdateOne) SetNillableSourceMode(v *backupsetting.SourceMode) *BackupSettingUpdateOne { + if v != nil { + _u.SetSourceMode(*v) + } + return _u +} + +// SetBackupRoot sets the "backup_root" field. +func (_u *BackupSettingUpdateOne) SetBackupRoot(v string) *BackupSettingUpdateOne { + _u.mutation.SetBackupRoot(v) + return _u +} + +// SetNillableBackupRoot sets the "backup_root" field if the given value is not nil. +func (_u *BackupSettingUpdateOne) SetNillableBackupRoot(v *string) *BackupSettingUpdateOne { + if v != nil { + _u.SetBackupRoot(*v) + } + return _u +} + +// SetRetentionDays sets the "retention_days" field. +func (_u *BackupSettingUpdateOne) SetRetentionDays(v int) *BackupSettingUpdateOne { + _u.mutation.ResetRetentionDays() + _u.mutation.SetRetentionDays(v) + return _u +} + +// SetNillableRetentionDays sets the "retention_days" field if the given value is not nil. +func (_u *BackupSettingUpdateOne) SetNillableRetentionDays(v *int) *BackupSettingUpdateOne { + if v != nil { + _u.SetRetentionDays(*v) + } + return _u +} + +// AddRetentionDays adds value to the "retention_days" field. +func (_u *BackupSettingUpdateOne) AddRetentionDays(v int) *BackupSettingUpdateOne { + _u.mutation.AddRetentionDays(v) + return _u +} + +// SetKeepLast sets the "keep_last" field. +func (_u *BackupSettingUpdateOne) SetKeepLast(v int) *BackupSettingUpdateOne { + _u.mutation.ResetKeepLast() + _u.mutation.SetKeepLast(v) + return _u +} + +// SetNillableKeepLast sets the "keep_last" field if the given value is not nil. +func (_u *BackupSettingUpdateOne) SetNillableKeepLast(v *int) *BackupSettingUpdateOne { + if v != nil { + _u.SetKeepLast(*v) + } + return _u +} + +// AddKeepLast adds value to the "keep_last" field. +func (_u *BackupSettingUpdateOne) AddKeepLast(v int) *BackupSettingUpdateOne { + _u.mutation.AddKeepLast(v) + return _u +} + +// SetSqlitePath sets the "sqlite_path" field. +func (_u *BackupSettingUpdateOne) SetSqlitePath(v string) *BackupSettingUpdateOne { + _u.mutation.SetSqlitePath(v) + return _u +} + +// SetNillableSqlitePath sets the "sqlite_path" field if the given value is not nil. +func (_u *BackupSettingUpdateOne) SetNillableSqlitePath(v *string) *BackupSettingUpdateOne { + if v != nil { + _u.SetSqlitePath(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupSettingUpdateOne) SetUpdatedAt(v time.Time) *BackupSettingUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the BackupSettingMutation object of the builder. +func (_u *BackupSettingUpdateOne) Mutation() *BackupSettingMutation { + return _u.mutation +} + +// Where appends a list predicates to the BackupSettingUpdate builder. +func (_u *BackupSettingUpdateOne) Where(ps ...predicate.BackupSetting) *BackupSettingUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *BackupSettingUpdateOne) Select(field string, fields ...string) *BackupSettingUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated BackupSetting entity. +func (_u *BackupSettingUpdateOne) Save(ctx context.Context) (*BackupSetting, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupSettingUpdateOne) SaveX(ctx context.Context) *BackupSetting { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *BackupSettingUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupSettingUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupSettingUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backupsetting.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupSettingUpdateOne) check() error { + if v, ok := _u.mutation.SourceMode(); ok { + if err := backupsetting.SourceModeValidator(v); err != nil { + return &ValidationError{Name: "source_mode", err: fmt.Errorf(`ent: validator failed for field "BackupSetting.source_mode": %w`, err)} + } + } + return nil +} + +func (_u *BackupSettingUpdateOne) sqlSave(ctx context.Context) (_node *BackupSetting, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupsetting.Table, backupsetting.Columns, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupSetting.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupsetting.FieldID) + for _, f := range fields { + if !backupsetting.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != backupsetting.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.SourceMode(); ok { + _spec.SetField(backupsetting.FieldSourceMode, field.TypeEnum, value) + } + if value, ok := _u.mutation.BackupRoot(); ok { + _spec.SetField(backupsetting.FieldBackupRoot, field.TypeString, value) + } + if value, ok := _u.mutation.RetentionDays(); ok { + _spec.SetField(backupsetting.FieldRetentionDays, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedRetentionDays(); ok { + _spec.AddField(backupsetting.FieldRetentionDays, field.TypeInt, value) + } + if value, ok := _u.mutation.KeepLast(); ok { + _spec.SetField(backupsetting.FieldKeepLast, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedKeepLast(); ok { + _spec.AddField(backupsetting.FieldKeepLast, field.TypeInt, value) + } + if value, ok := _u.mutation.SqlitePath(); ok { + _spec.SetField(backupsetting.FieldSqlitePath, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backupsetting.FieldUpdatedAt, field.TypeTime, value) + } + _node = &BackupSetting{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupsetting.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backup/ent/backupsourceconfig.go b/backup/ent/backupsourceconfig.go new file mode 100644 index 000000000..705f66b7c --- /dev/null +++ b/backup/ent/backupsourceconfig.go @@ -0,0 +1,232 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" +) + +// BackupSourceConfig is the model entity for the BackupSourceConfig schema. +type BackupSourceConfig struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // SourceType holds the value of the "source_type" field. + SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` + // Host holds the value of the "host" field. + Host string `json:"host,omitempty"` + // Port holds the value of the "port" field. + Port *int `json:"port,omitempty"` + // Username holds the value of the "username" field. + Username string `json:"username,omitempty"` + // PasswordEncrypted holds the value of the "password_encrypted" field. + PasswordEncrypted string `json:"-"` + // Database holds the value of the "database" field. + Database string `json:"database,omitempty"` + // SslMode holds the value of the "ssl_mode" field. + SslMode string `json:"ssl_mode,omitempty"` + // Addr holds the value of the "addr" field. + Addr string `json:"addr,omitempty"` + // RedisDb holds the value of the "redis_db" field. + RedisDb *int `json:"redis_db,omitempty"` + // ContainerName holds the value of the "container_name" field. + ContainerName string `json:"container_name,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*BackupSourceConfig) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case backupsourceconfig.FieldID, backupsourceconfig.FieldPort, backupsourceconfig.FieldRedisDb: + values[i] = new(sql.NullInt64) + case backupsourceconfig.FieldSourceType, backupsourceconfig.FieldHost, backupsourceconfig.FieldUsername, backupsourceconfig.FieldPasswordEncrypted, backupsourceconfig.FieldDatabase, backupsourceconfig.FieldSslMode, backupsourceconfig.FieldAddr, backupsourceconfig.FieldContainerName: + values[i] = new(sql.NullString) + case backupsourceconfig.FieldCreatedAt, backupsourceconfig.FieldUpdatedAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the BackupSourceConfig fields. +func (_m *BackupSourceConfig) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case backupsourceconfig.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int(value.Int64) + case backupsourceconfig.FieldSourceType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field source_type", values[i]) + } else if value.Valid { + _m.SourceType = backupsourceconfig.SourceType(value.String) + } + case backupsourceconfig.FieldHost: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field host", values[i]) + } else if value.Valid { + _m.Host = value.String + } + case backupsourceconfig.FieldPort: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field port", values[i]) + } else if value.Valid { + _m.Port = new(int) + *_m.Port = int(value.Int64) + } + case backupsourceconfig.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + _m.Username = value.String + } + case backupsourceconfig.FieldPasswordEncrypted: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field password_encrypted", values[i]) + } else if value.Valid { + _m.PasswordEncrypted = value.String + } + case backupsourceconfig.FieldDatabase: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field database", values[i]) + } else if value.Valid { + _m.Database = value.String + } + case backupsourceconfig.FieldSslMode: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field ssl_mode", values[i]) + } else if value.Valid { + _m.SslMode = value.String + } + case backupsourceconfig.FieldAddr: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field addr", values[i]) + } else if value.Valid { + _m.Addr = value.String + } + case backupsourceconfig.FieldRedisDb: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field redis_db", values[i]) + } else if value.Valid { + _m.RedisDb = new(int) + *_m.RedisDb = int(value.Int64) + } + case backupsourceconfig.FieldContainerName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field container_name", values[i]) + } else if value.Valid { + _m.ContainerName = value.String + } + case backupsourceconfig.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case backupsourceconfig.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the BackupSourceConfig. +// This includes values selected through modifiers, order, etc. +func (_m *BackupSourceConfig) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this BackupSourceConfig. +// Note that you need to call BackupSourceConfig.Unwrap() before calling this method if this BackupSourceConfig +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *BackupSourceConfig) Update() *BackupSourceConfigUpdateOne { + return NewBackupSourceConfigClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the BackupSourceConfig entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *BackupSourceConfig) Unwrap() *BackupSourceConfig { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: BackupSourceConfig is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *BackupSourceConfig) String() string { + var builder strings.Builder + builder.WriteString("BackupSourceConfig(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("source_type=") + builder.WriteString(fmt.Sprintf("%v", _m.SourceType)) + builder.WriteString(", ") + builder.WriteString("host=") + builder.WriteString(_m.Host) + builder.WriteString(", ") + if v := _m.Port; v != nil { + builder.WriteString("port=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("username=") + builder.WriteString(_m.Username) + builder.WriteString(", ") + builder.WriteString("password_encrypted=") + builder.WriteString(", ") + builder.WriteString("database=") + builder.WriteString(_m.Database) + builder.WriteString(", ") + builder.WriteString("ssl_mode=") + builder.WriteString(_m.SslMode) + builder.WriteString(", ") + builder.WriteString("addr=") + builder.WriteString(_m.Addr) + builder.WriteString(", ") + if v := _m.RedisDb; v != nil { + builder.WriteString("redis_db=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("container_name=") + builder.WriteString(_m.ContainerName) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// BackupSourceConfigs is a parsable slice of BackupSourceConfig. +type BackupSourceConfigs []*BackupSourceConfig diff --git a/backup/ent/backupsourceconfig/backupsourceconfig.go b/backup/ent/backupsourceconfig/backupsourceconfig.go new file mode 100644 index 000000000..b81a56b67 --- /dev/null +++ b/backup/ent/backupsourceconfig/backupsourceconfig.go @@ -0,0 +1,172 @@ +// Code generated by ent, DO NOT EDIT. + +package backupsourceconfig + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the backupsourceconfig type in the database. + Label = "backup_source_config" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldSourceType holds the string denoting the source_type field in the database. + FieldSourceType = "source_type" + // FieldHost holds the string denoting the host field in the database. + FieldHost = "host" + // FieldPort holds the string denoting the port field in the database. + FieldPort = "port" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldPasswordEncrypted holds the string denoting the password_encrypted field in the database. + FieldPasswordEncrypted = "password_encrypted" + // FieldDatabase holds the string denoting the database field in the database. + FieldDatabase = "database" + // FieldSslMode holds the string denoting the ssl_mode field in the database. + FieldSslMode = "ssl_mode" + // FieldAddr holds the string denoting the addr field in the database. + FieldAddr = "addr" + // FieldRedisDb holds the string denoting the redis_db field in the database. + FieldRedisDb = "redis_db" + // FieldContainerName holds the string denoting the container_name field in the database. + FieldContainerName = "container_name" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // Table holds the table name of the backupsourceconfig in the database. + Table = "backup_source_configs" +) + +// Columns holds all SQL columns for backupsourceconfig fields. +var Columns = []string{ + FieldID, + FieldSourceType, + FieldHost, + FieldPort, + FieldUsername, + FieldPasswordEncrypted, + FieldDatabase, + FieldSslMode, + FieldAddr, + FieldRedisDb, + FieldContainerName, + FieldCreatedAt, + FieldUpdatedAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultContainerName holds the default value on creation for the "container_name" field. + DefaultContainerName string + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time +) + +// SourceType defines the type for the "source_type" enum field. +type SourceType string + +// SourceType values. +const ( + SourceTypePostgres SourceType = "postgres" + SourceTypeRedis SourceType = "redis" +) + +func (st SourceType) String() string { + return string(st) +} + +// SourceTypeValidator is a validator for the "source_type" field enum values. It is called by the builders before save. +func SourceTypeValidator(st SourceType) error { + switch st { + case SourceTypePostgres, SourceTypeRedis: + return nil + default: + return fmt.Errorf("backupsourceconfig: invalid enum value for source_type field: %q", st) + } +} + +// OrderOption defines the ordering options for the BackupSourceConfig queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// BySourceType orders the results by the source_type field. +func BySourceType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSourceType, opts...).ToFunc() +} + +// ByHost orders the results by the host field. +func ByHost(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHost, opts...).ToFunc() +} + +// ByPort orders the results by the port field. +func ByPort(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPort, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByPasswordEncrypted orders the results by the password_encrypted field. +func ByPasswordEncrypted(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPasswordEncrypted, opts...).ToFunc() +} + +// ByDatabase orders the results by the database field. +func ByDatabase(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDatabase, opts...).ToFunc() +} + +// BySslMode orders the results by the ssl_mode field. +func BySslMode(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSslMode, opts...).ToFunc() +} + +// ByAddr orders the results by the addr field. +func ByAddr(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldAddr, opts...).ToFunc() +} + +// ByRedisDb orders the results by the redis_db field. +func ByRedisDb(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRedisDb, opts...).ToFunc() +} + +// ByContainerName orders the results by the container_name field. +func ByContainerName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldContainerName, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} diff --git a/backup/ent/backupsourceconfig/where.go b/backup/ent/backupsourceconfig/where.go new file mode 100644 index 000000000..e749601c9 --- /dev/null +++ b/backup/ent/backupsourceconfig/where.go @@ -0,0 +1,840 @@ +// Code generated by ent, DO NOT EDIT. + +package backupsourceconfig + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldID, id)) +} + +// Host applies equality check predicate on the "host" field. It's identical to HostEQ. +func Host(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldHost, v)) +} + +// Port applies equality check predicate on the "port" field. It's identical to PortEQ. +func Port(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldPort, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldUsername, v)) +} + +// PasswordEncrypted applies equality check predicate on the "password_encrypted" field. It's identical to PasswordEncryptedEQ. +func PasswordEncrypted(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldPasswordEncrypted, v)) +} + +// Database applies equality check predicate on the "database" field. It's identical to DatabaseEQ. +func Database(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldDatabase, v)) +} + +// SslMode applies equality check predicate on the "ssl_mode" field. It's identical to SslModeEQ. +func SslMode(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldSslMode, v)) +} + +// Addr applies equality check predicate on the "addr" field. It's identical to AddrEQ. +func Addr(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldAddr, v)) +} + +// RedisDb applies equality check predicate on the "redis_db" field. It's identical to RedisDbEQ. +func RedisDb(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldRedisDb, v)) +} + +// ContainerName applies equality check predicate on the "container_name" field. It's identical to ContainerNameEQ. +func ContainerName(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldContainerName, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// SourceTypeEQ applies the EQ predicate on the "source_type" field. +func SourceTypeEQ(v SourceType) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldSourceType, v)) +} + +// SourceTypeNEQ applies the NEQ predicate on the "source_type" field. +func SourceTypeNEQ(v SourceType) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldSourceType, v)) +} + +// SourceTypeIn applies the In predicate on the "source_type" field. +func SourceTypeIn(vs ...SourceType) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldSourceType, vs...)) +} + +// SourceTypeNotIn applies the NotIn predicate on the "source_type" field. +func SourceTypeNotIn(vs ...SourceType) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldSourceType, vs...)) +} + +// HostEQ applies the EQ predicate on the "host" field. +func HostEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldHost, v)) +} + +// HostNEQ applies the NEQ predicate on the "host" field. +func HostNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldHost, v)) +} + +// HostIn applies the In predicate on the "host" field. +func HostIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldHost, vs...)) +} + +// HostNotIn applies the NotIn predicate on the "host" field. +func HostNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldHost, vs...)) +} + +// HostGT applies the GT predicate on the "host" field. +func HostGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldHost, v)) +} + +// HostGTE applies the GTE predicate on the "host" field. +func HostGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldHost, v)) +} + +// HostLT applies the LT predicate on the "host" field. +func HostLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldHost, v)) +} + +// HostLTE applies the LTE predicate on the "host" field. +func HostLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldHost, v)) +} + +// HostContains applies the Contains predicate on the "host" field. +func HostContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldHost, v)) +} + +// HostHasPrefix applies the HasPrefix predicate on the "host" field. +func HostHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldHost, v)) +} + +// HostHasSuffix applies the HasSuffix predicate on the "host" field. +func HostHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldHost, v)) +} + +// HostIsNil applies the IsNil predicate on the "host" field. +func HostIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldHost)) +} + +// HostNotNil applies the NotNil predicate on the "host" field. +func HostNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldHost)) +} + +// HostEqualFold applies the EqualFold predicate on the "host" field. +func HostEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldHost, v)) +} + +// HostContainsFold applies the ContainsFold predicate on the "host" field. +func HostContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldHost, v)) +} + +// PortEQ applies the EQ predicate on the "port" field. +func PortEQ(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldPort, v)) +} + +// PortNEQ applies the NEQ predicate on the "port" field. +func PortNEQ(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldPort, v)) +} + +// PortIn applies the In predicate on the "port" field. +func PortIn(vs ...int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldPort, vs...)) +} + +// PortNotIn applies the NotIn predicate on the "port" field. +func PortNotIn(vs ...int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldPort, vs...)) +} + +// PortGT applies the GT predicate on the "port" field. +func PortGT(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldPort, v)) +} + +// PortGTE applies the GTE predicate on the "port" field. +func PortGTE(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldPort, v)) +} + +// PortLT applies the LT predicate on the "port" field. +func PortLT(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldPort, v)) +} + +// PortLTE applies the LTE predicate on the "port" field. +func PortLTE(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldPort, v)) +} + +// PortIsNil applies the IsNil predicate on the "port" field. +func PortIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldPort)) +} + +// PortNotNil applies the NotNil predicate on the "port" field. +func PortNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldPort)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameIsNil applies the IsNil predicate on the "username" field. +func UsernameIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldUsername)) +} + +// UsernameNotNil applies the NotNil predicate on the "username" field. +func UsernameNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldUsername)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldUsername, v)) +} + +// PasswordEncryptedEQ applies the EQ predicate on the "password_encrypted" field. +func PasswordEncryptedEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedNEQ applies the NEQ predicate on the "password_encrypted" field. +func PasswordEncryptedNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedIn applies the In predicate on the "password_encrypted" field. +func PasswordEncryptedIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldPasswordEncrypted, vs...)) +} + +// PasswordEncryptedNotIn applies the NotIn predicate on the "password_encrypted" field. +func PasswordEncryptedNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldPasswordEncrypted, vs...)) +} + +// PasswordEncryptedGT applies the GT predicate on the "password_encrypted" field. +func PasswordEncryptedGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedGTE applies the GTE predicate on the "password_encrypted" field. +func PasswordEncryptedGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedLT applies the LT predicate on the "password_encrypted" field. +func PasswordEncryptedLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedLTE applies the LTE predicate on the "password_encrypted" field. +func PasswordEncryptedLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedContains applies the Contains predicate on the "password_encrypted" field. +func PasswordEncryptedContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedHasPrefix applies the HasPrefix predicate on the "password_encrypted" field. +func PasswordEncryptedHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedHasSuffix applies the HasSuffix predicate on the "password_encrypted" field. +func PasswordEncryptedHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedIsNil applies the IsNil predicate on the "password_encrypted" field. +func PasswordEncryptedIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldPasswordEncrypted)) +} + +// PasswordEncryptedNotNil applies the NotNil predicate on the "password_encrypted" field. +func PasswordEncryptedNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldPasswordEncrypted)) +} + +// PasswordEncryptedEqualFold applies the EqualFold predicate on the "password_encrypted" field. +func PasswordEncryptedEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldPasswordEncrypted, v)) +} + +// PasswordEncryptedContainsFold applies the ContainsFold predicate on the "password_encrypted" field. +func PasswordEncryptedContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldPasswordEncrypted, v)) +} + +// DatabaseEQ applies the EQ predicate on the "database" field. +func DatabaseEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldDatabase, v)) +} + +// DatabaseNEQ applies the NEQ predicate on the "database" field. +func DatabaseNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldDatabase, v)) +} + +// DatabaseIn applies the In predicate on the "database" field. +func DatabaseIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldDatabase, vs...)) +} + +// DatabaseNotIn applies the NotIn predicate on the "database" field. +func DatabaseNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldDatabase, vs...)) +} + +// DatabaseGT applies the GT predicate on the "database" field. +func DatabaseGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldDatabase, v)) +} + +// DatabaseGTE applies the GTE predicate on the "database" field. +func DatabaseGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldDatabase, v)) +} + +// DatabaseLT applies the LT predicate on the "database" field. +func DatabaseLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldDatabase, v)) +} + +// DatabaseLTE applies the LTE predicate on the "database" field. +func DatabaseLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldDatabase, v)) +} + +// DatabaseContains applies the Contains predicate on the "database" field. +func DatabaseContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldDatabase, v)) +} + +// DatabaseHasPrefix applies the HasPrefix predicate on the "database" field. +func DatabaseHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldDatabase, v)) +} + +// DatabaseHasSuffix applies the HasSuffix predicate on the "database" field. +func DatabaseHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldDatabase, v)) +} + +// DatabaseIsNil applies the IsNil predicate on the "database" field. +func DatabaseIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldDatabase)) +} + +// DatabaseNotNil applies the NotNil predicate on the "database" field. +func DatabaseNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldDatabase)) +} + +// DatabaseEqualFold applies the EqualFold predicate on the "database" field. +func DatabaseEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldDatabase, v)) +} + +// DatabaseContainsFold applies the ContainsFold predicate on the "database" field. +func DatabaseContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldDatabase, v)) +} + +// SslModeEQ applies the EQ predicate on the "ssl_mode" field. +func SslModeEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldSslMode, v)) +} + +// SslModeNEQ applies the NEQ predicate on the "ssl_mode" field. +func SslModeNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldSslMode, v)) +} + +// SslModeIn applies the In predicate on the "ssl_mode" field. +func SslModeIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldSslMode, vs...)) +} + +// SslModeNotIn applies the NotIn predicate on the "ssl_mode" field. +func SslModeNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldSslMode, vs...)) +} + +// SslModeGT applies the GT predicate on the "ssl_mode" field. +func SslModeGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldSslMode, v)) +} + +// SslModeGTE applies the GTE predicate on the "ssl_mode" field. +func SslModeGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldSslMode, v)) +} + +// SslModeLT applies the LT predicate on the "ssl_mode" field. +func SslModeLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldSslMode, v)) +} + +// SslModeLTE applies the LTE predicate on the "ssl_mode" field. +func SslModeLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldSslMode, v)) +} + +// SslModeContains applies the Contains predicate on the "ssl_mode" field. +func SslModeContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldSslMode, v)) +} + +// SslModeHasPrefix applies the HasPrefix predicate on the "ssl_mode" field. +func SslModeHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldSslMode, v)) +} + +// SslModeHasSuffix applies the HasSuffix predicate on the "ssl_mode" field. +func SslModeHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldSslMode, v)) +} + +// SslModeIsNil applies the IsNil predicate on the "ssl_mode" field. +func SslModeIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldSslMode)) +} + +// SslModeNotNil applies the NotNil predicate on the "ssl_mode" field. +func SslModeNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldSslMode)) +} + +// SslModeEqualFold applies the EqualFold predicate on the "ssl_mode" field. +func SslModeEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldSslMode, v)) +} + +// SslModeContainsFold applies the ContainsFold predicate on the "ssl_mode" field. +func SslModeContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldSslMode, v)) +} + +// AddrEQ applies the EQ predicate on the "addr" field. +func AddrEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldAddr, v)) +} + +// AddrNEQ applies the NEQ predicate on the "addr" field. +func AddrNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldAddr, v)) +} + +// AddrIn applies the In predicate on the "addr" field. +func AddrIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldAddr, vs...)) +} + +// AddrNotIn applies the NotIn predicate on the "addr" field. +func AddrNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldAddr, vs...)) +} + +// AddrGT applies the GT predicate on the "addr" field. +func AddrGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldAddr, v)) +} + +// AddrGTE applies the GTE predicate on the "addr" field. +func AddrGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldAddr, v)) +} + +// AddrLT applies the LT predicate on the "addr" field. +func AddrLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldAddr, v)) +} + +// AddrLTE applies the LTE predicate on the "addr" field. +func AddrLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldAddr, v)) +} + +// AddrContains applies the Contains predicate on the "addr" field. +func AddrContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldAddr, v)) +} + +// AddrHasPrefix applies the HasPrefix predicate on the "addr" field. +func AddrHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldAddr, v)) +} + +// AddrHasSuffix applies the HasSuffix predicate on the "addr" field. +func AddrHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldAddr, v)) +} + +// AddrIsNil applies the IsNil predicate on the "addr" field. +func AddrIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldAddr)) +} + +// AddrNotNil applies the NotNil predicate on the "addr" field. +func AddrNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldAddr)) +} + +// AddrEqualFold applies the EqualFold predicate on the "addr" field. +func AddrEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldAddr, v)) +} + +// AddrContainsFold applies the ContainsFold predicate on the "addr" field. +func AddrContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldAddr, v)) +} + +// RedisDbEQ applies the EQ predicate on the "redis_db" field. +func RedisDbEQ(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldRedisDb, v)) +} + +// RedisDbNEQ applies the NEQ predicate on the "redis_db" field. +func RedisDbNEQ(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldRedisDb, v)) +} + +// RedisDbIn applies the In predicate on the "redis_db" field. +func RedisDbIn(vs ...int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldRedisDb, vs...)) +} + +// RedisDbNotIn applies the NotIn predicate on the "redis_db" field. +func RedisDbNotIn(vs ...int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldRedisDb, vs...)) +} + +// RedisDbGT applies the GT predicate on the "redis_db" field. +func RedisDbGT(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldRedisDb, v)) +} + +// RedisDbGTE applies the GTE predicate on the "redis_db" field. +func RedisDbGTE(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldRedisDb, v)) +} + +// RedisDbLT applies the LT predicate on the "redis_db" field. +func RedisDbLT(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldRedisDb, v)) +} + +// RedisDbLTE applies the LTE predicate on the "redis_db" field. +func RedisDbLTE(v int) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldRedisDb, v)) +} + +// RedisDbIsNil applies the IsNil predicate on the "redis_db" field. +func RedisDbIsNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIsNull(FieldRedisDb)) +} + +// RedisDbNotNil applies the NotNil predicate on the "redis_db" field. +func RedisDbNotNil() predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotNull(FieldRedisDb)) +} + +// ContainerNameEQ applies the EQ predicate on the "container_name" field. +func ContainerNameEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldContainerName, v)) +} + +// ContainerNameNEQ applies the NEQ predicate on the "container_name" field. +func ContainerNameNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldContainerName, v)) +} + +// ContainerNameIn applies the In predicate on the "container_name" field. +func ContainerNameIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldContainerName, vs...)) +} + +// ContainerNameNotIn applies the NotIn predicate on the "container_name" field. +func ContainerNameNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldContainerName, vs...)) +} + +// ContainerNameGT applies the GT predicate on the "container_name" field. +func ContainerNameGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldContainerName, v)) +} + +// ContainerNameGTE applies the GTE predicate on the "container_name" field. +func ContainerNameGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldContainerName, v)) +} + +// ContainerNameLT applies the LT predicate on the "container_name" field. +func ContainerNameLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldContainerName, v)) +} + +// ContainerNameLTE applies the LTE predicate on the "container_name" field. +func ContainerNameLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldContainerName, v)) +} + +// ContainerNameContains applies the Contains predicate on the "container_name" field. +func ContainerNameContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldContainerName, v)) +} + +// ContainerNameHasPrefix applies the HasPrefix predicate on the "container_name" field. +func ContainerNameHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldContainerName, v)) +} + +// ContainerNameHasSuffix applies the HasSuffix predicate on the "container_name" field. +func ContainerNameHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldContainerName, v)) +} + +// ContainerNameEqualFold applies the EqualFold predicate on the "container_name" field. +func ContainerNameEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldContainerName, v)) +} + +// ContainerNameContainsFold applies the ContainsFold predicate on the "container_name" field. +func ContainerNameContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldContainerName, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.BackupSourceConfig) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.BackupSourceConfig) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.BackupSourceConfig) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.NotPredicates(p)) +} diff --git a/backup/ent/backupsourceconfig_create.go b/backup/ent/backupsourceconfig_create.go new file mode 100644 index 000000000..a514f5ce3 --- /dev/null +++ b/backup/ent/backupsourceconfig_create.go @@ -0,0 +1,414 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" +) + +// BackupSourceConfigCreate is the builder for creating a BackupSourceConfig entity. +type BackupSourceConfigCreate struct { + config + mutation *BackupSourceConfigMutation + hooks []Hook +} + +// SetSourceType sets the "source_type" field. +func (_c *BackupSourceConfigCreate) SetSourceType(v backupsourceconfig.SourceType) *BackupSourceConfigCreate { + _c.mutation.SetSourceType(v) + return _c +} + +// SetHost sets the "host" field. +func (_c *BackupSourceConfigCreate) SetHost(v string) *BackupSourceConfigCreate { + _c.mutation.SetHost(v) + return _c +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableHost(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetHost(*v) + } + return _c +} + +// SetPort sets the "port" field. +func (_c *BackupSourceConfigCreate) SetPort(v int) *BackupSourceConfigCreate { + _c.mutation.SetPort(v) + return _c +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillablePort(v *int) *BackupSourceConfigCreate { + if v != nil { + _c.SetPort(*v) + } + return _c +} + +// SetUsername sets the "username" field. +func (_c *BackupSourceConfigCreate) SetUsername(v string) *BackupSourceConfigCreate { + _c.mutation.SetUsername(v) + return _c +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableUsername(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetUsername(*v) + } + return _c +} + +// SetPasswordEncrypted sets the "password_encrypted" field. +func (_c *BackupSourceConfigCreate) SetPasswordEncrypted(v string) *BackupSourceConfigCreate { + _c.mutation.SetPasswordEncrypted(v) + return _c +} + +// SetNillablePasswordEncrypted sets the "password_encrypted" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillablePasswordEncrypted(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetPasswordEncrypted(*v) + } + return _c +} + +// SetDatabase sets the "database" field. +func (_c *BackupSourceConfigCreate) SetDatabase(v string) *BackupSourceConfigCreate { + _c.mutation.SetDatabase(v) + return _c +} + +// SetNillableDatabase sets the "database" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableDatabase(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetDatabase(*v) + } + return _c +} + +// SetSslMode sets the "ssl_mode" field. +func (_c *BackupSourceConfigCreate) SetSslMode(v string) *BackupSourceConfigCreate { + _c.mutation.SetSslMode(v) + return _c +} + +// SetNillableSslMode sets the "ssl_mode" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableSslMode(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetSslMode(*v) + } + return _c +} + +// SetAddr sets the "addr" field. +func (_c *BackupSourceConfigCreate) SetAddr(v string) *BackupSourceConfigCreate { + _c.mutation.SetAddr(v) + return _c +} + +// SetNillableAddr sets the "addr" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableAddr(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetAddr(*v) + } + return _c +} + +// SetRedisDb sets the "redis_db" field. +func (_c *BackupSourceConfigCreate) SetRedisDb(v int) *BackupSourceConfigCreate { + _c.mutation.SetRedisDb(v) + return _c +} + +// SetNillableRedisDb sets the "redis_db" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableRedisDb(v *int) *BackupSourceConfigCreate { + if v != nil { + _c.SetRedisDb(*v) + } + return _c +} + +// SetContainerName sets the "container_name" field. +func (_c *BackupSourceConfigCreate) SetContainerName(v string) *BackupSourceConfigCreate { + _c.mutation.SetContainerName(v) + return _c +} + +// SetNillableContainerName sets the "container_name" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableContainerName(v *string) *BackupSourceConfigCreate { + if v != nil { + _c.SetContainerName(*v) + } + return _c +} + +// SetCreatedAt sets the "created_at" field. +func (_c *BackupSourceConfigCreate) SetCreatedAt(v time.Time) *BackupSourceConfigCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableCreatedAt(v *time.Time) *BackupSourceConfigCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *BackupSourceConfigCreate) SetUpdatedAt(v time.Time) *BackupSourceConfigCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableUpdatedAt(v *time.Time) *BackupSourceConfigCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// Mutation returns the BackupSourceConfigMutation object of the builder. +func (_c *BackupSourceConfigCreate) Mutation() *BackupSourceConfigMutation { + return _c.mutation +} + +// Save creates the BackupSourceConfig in the database. +func (_c *BackupSourceConfigCreate) Save(ctx context.Context) (*BackupSourceConfig, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *BackupSourceConfigCreate) SaveX(ctx context.Context) *BackupSourceConfig { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupSourceConfigCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupSourceConfigCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *BackupSourceConfigCreate) defaults() { + if _, ok := _c.mutation.ContainerName(); !ok { + v := backupsourceconfig.DefaultContainerName + _c.mutation.SetContainerName(v) + } + if _, ok := _c.mutation.CreatedAt(); !ok { + v := backupsourceconfig.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := backupsourceconfig.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *BackupSourceConfigCreate) check() error { + if _, ok := _c.mutation.SourceType(); !ok { + return &ValidationError{Name: "source_type", err: errors.New(`ent: missing required field "BackupSourceConfig.source_type"`)} + } + if v, ok := _c.mutation.SourceType(); ok { + if err := backupsourceconfig.SourceTypeValidator(v); err != nil { + return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} + } + } + if _, ok := _c.mutation.ContainerName(); !ok { + return &ValidationError{Name: "container_name", err: errors.New(`ent: missing required field "BackupSourceConfig.container_name"`)} + } + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupSourceConfig.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupSourceConfig.updated_at"`)} + } + return nil +} + +func (_c *BackupSourceConfigCreate) sqlSave(ctx context.Context) (*BackupSourceConfig, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *BackupSourceConfigCreate) createSpec() (*BackupSourceConfig, *sqlgraph.CreateSpec) { + var ( + _node = &BackupSourceConfig{config: _c.config} + _spec = sqlgraph.NewCreateSpec(backupsourceconfig.Table, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) + ) + if value, ok := _c.mutation.SourceType(); ok { + _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) + _node.SourceType = value + } + if value, ok := _c.mutation.Host(); ok { + _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) + _node.Host = value + } + if value, ok := _c.mutation.Port(); ok { + _spec.SetField(backupsourceconfig.FieldPort, field.TypeInt, value) + _node.Port = &value + } + if value, ok := _c.mutation.Username(); ok { + _spec.SetField(backupsourceconfig.FieldUsername, field.TypeString, value) + _node.Username = value + } + if value, ok := _c.mutation.PasswordEncrypted(); ok { + _spec.SetField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString, value) + _node.PasswordEncrypted = value + } + if value, ok := _c.mutation.Database(); ok { + _spec.SetField(backupsourceconfig.FieldDatabase, field.TypeString, value) + _node.Database = value + } + if value, ok := _c.mutation.SslMode(); ok { + _spec.SetField(backupsourceconfig.FieldSslMode, field.TypeString, value) + _node.SslMode = value + } + if value, ok := _c.mutation.Addr(); ok { + _spec.SetField(backupsourceconfig.FieldAddr, field.TypeString, value) + _node.Addr = value + } + if value, ok := _c.mutation.RedisDb(); ok { + _spec.SetField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) + _node.RedisDb = &value + } + if value, ok := _c.mutation.ContainerName(); ok { + _spec.SetField(backupsourceconfig.FieldContainerName, field.TypeString, value) + _node.ContainerName = value + } + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(backupsourceconfig.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(backupsourceconfig.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + return _node, _spec +} + +// BackupSourceConfigCreateBulk is the builder for creating many BackupSourceConfig entities in bulk. +type BackupSourceConfigCreateBulk struct { + config + err error + builders []*BackupSourceConfigCreate +} + +// Save creates the BackupSourceConfig entities in the database. +func (_c *BackupSourceConfigCreateBulk) Save(ctx context.Context) ([]*BackupSourceConfig, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*BackupSourceConfig, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*BackupSourceConfigMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *BackupSourceConfigCreateBulk) SaveX(ctx context.Context) []*BackupSourceConfig { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *BackupSourceConfigCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *BackupSourceConfigCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupsourceconfig_delete.go b/backup/ent/backupsourceconfig_delete.go new file mode 100644 index 000000000..0d6d36f92 --- /dev/null +++ b/backup/ent/backupsourceconfig_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupSourceConfigDelete is the builder for deleting a BackupSourceConfig entity. +type BackupSourceConfigDelete struct { + config + hooks []Hook + mutation *BackupSourceConfigMutation +} + +// Where appends a list predicates to the BackupSourceConfigDelete builder. +func (_d *BackupSourceConfigDelete) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *BackupSourceConfigDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupSourceConfigDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *BackupSourceConfigDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(backupsourceconfig.Table, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// BackupSourceConfigDeleteOne is the builder for deleting a single BackupSourceConfig entity. +type BackupSourceConfigDeleteOne struct { + _d *BackupSourceConfigDelete +} + +// Where appends a list predicates to the BackupSourceConfigDelete builder. +func (_d *BackupSourceConfigDeleteOne) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *BackupSourceConfigDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{backupsourceconfig.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *BackupSourceConfigDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backup/ent/backupsourceconfig_query.go b/backup/ent/backupsourceconfig_query.go new file mode 100644 index 000000000..3400a3fcd --- /dev/null +++ b/backup/ent/backupsourceconfig_query.go @@ -0,0 +1,527 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupSourceConfigQuery is the builder for querying BackupSourceConfig entities. +type BackupSourceConfigQuery struct { + config + ctx *QueryContext + order []backupsourceconfig.OrderOption + inters []Interceptor + predicates []predicate.BackupSourceConfig + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the BackupSourceConfigQuery builder. +func (_q *BackupSourceConfigQuery) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *BackupSourceConfigQuery) Limit(limit int) *BackupSourceConfigQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *BackupSourceConfigQuery) Offset(offset int) *BackupSourceConfigQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *BackupSourceConfigQuery) Unique(unique bool) *BackupSourceConfigQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *BackupSourceConfigQuery) Order(o ...backupsourceconfig.OrderOption) *BackupSourceConfigQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first BackupSourceConfig entity from the query. +// Returns a *NotFoundError when no BackupSourceConfig was found. +func (_q *BackupSourceConfigQuery) First(ctx context.Context) (*BackupSourceConfig, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{backupsourceconfig.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) FirstX(ctx context.Context) *BackupSourceConfig { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first BackupSourceConfig ID from the query. +// Returns a *NotFoundError when no BackupSourceConfig ID was found. +func (_q *BackupSourceConfigQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{backupsourceconfig.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) FirstIDX(ctx context.Context) int { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single BackupSourceConfig entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one BackupSourceConfig entity is found. +// Returns a *NotFoundError when no BackupSourceConfig entities are found. +func (_q *BackupSourceConfigQuery) Only(ctx context.Context) (*BackupSourceConfig, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{backupsourceconfig.Label} + default: + return nil, &NotSingularError{backupsourceconfig.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) OnlyX(ctx context.Context) *BackupSourceConfig { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only BackupSourceConfig ID in the query. +// Returns a *NotSingularError when more than one BackupSourceConfig ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *BackupSourceConfigQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{backupsourceconfig.Label} + default: + err = &NotSingularError{backupsourceconfig.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) OnlyIDX(ctx context.Context) int { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of BackupSourceConfigs. +func (_q *BackupSourceConfigQuery) All(ctx context.Context) ([]*BackupSourceConfig, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*BackupSourceConfig, *BackupSourceConfigQuery]() + return withInterceptors[[]*BackupSourceConfig](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) AllX(ctx context.Context) []*BackupSourceConfig { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of BackupSourceConfig IDs. +func (_q *BackupSourceConfigQuery) IDs(ctx context.Context) (ids []int, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(backupsourceconfig.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) IDsX(ctx context.Context) []int { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *BackupSourceConfigQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*BackupSourceConfigQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *BackupSourceConfigQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *BackupSourceConfigQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the BackupSourceConfigQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *BackupSourceConfigQuery) Clone() *BackupSourceConfigQuery { + if _q == nil { + return nil + } + return &BackupSourceConfigQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]backupsourceconfig.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.BackupSourceConfig{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.BackupSourceConfig.Query(). +// GroupBy(backupsourceconfig.FieldSourceType). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *BackupSourceConfigQuery) GroupBy(field string, fields ...string) *BackupSourceConfigGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &BackupSourceConfigGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = backupsourceconfig.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` +// } +// +// client.BackupSourceConfig.Query(). +// Select(backupsourceconfig.FieldSourceType). +// Scan(ctx, &v) +func (_q *BackupSourceConfigQuery) Select(fields ...string) *BackupSourceConfigSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &BackupSourceConfigSelect{BackupSourceConfigQuery: _q} + sbuild.label = backupsourceconfig.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a BackupSourceConfigSelect configured with the given aggregations. +func (_q *BackupSourceConfigQuery) Aggregate(fns ...AggregateFunc) *BackupSourceConfigSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *BackupSourceConfigQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !backupsourceconfig.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *BackupSourceConfigQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupSourceConfig, error) { + var ( + nodes = []*BackupSourceConfig{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*BackupSourceConfig).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &BackupSourceConfig{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *BackupSourceConfigQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *BackupSourceConfigQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(backupsourceconfig.Table, backupsourceconfig.Columns, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupsourceconfig.FieldID) + for i := range fields { + if fields[i] != backupsourceconfig.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *BackupSourceConfigQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(backupsourceconfig.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = backupsourceconfig.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// BackupSourceConfigGroupBy is the group-by builder for BackupSourceConfig entities. +type BackupSourceConfigGroupBy struct { + selector + build *BackupSourceConfigQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *BackupSourceConfigGroupBy) Aggregate(fns ...AggregateFunc) *BackupSourceConfigGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *BackupSourceConfigGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupSourceConfigQuery, *BackupSourceConfigGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *BackupSourceConfigGroupBy) sqlScan(ctx context.Context, root *BackupSourceConfigQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// BackupSourceConfigSelect is the builder for selecting fields of BackupSourceConfig entities. +type BackupSourceConfigSelect struct { + *BackupSourceConfigQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *BackupSourceConfigSelect) Aggregate(fns ...AggregateFunc) *BackupSourceConfigSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *BackupSourceConfigSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*BackupSourceConfigQuery, *BackupSourceConfigSelect](ctx, _s.BackupSourceConfigQuery, _s, _s.inters, v) +} + +func (_s *BackupSourceConfigSelect) sqlScan(ctx context.Context, root *BackupSourceConfigQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backup/ent/backupsourceconfig_update.go b/backup/ent/backupsourceconfig_update.go new file mode 100644 index 000000000..d841b02d5 --- /dev/null +++ b/backup/ent/backupsourceconfig_update.go @@ -0,0 +1,762 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +// BackupSourceConfigUpdate is the builder for updating BackupSourceConfig entities. +type BackupSourceConfigUpdate struct { + config + hooks []Hook + mutation *BackupSourceConfigMutation +} + +// Where appends a list predicates to the BackupSourceConfigUpdate builder. +func (_u *BackupSourceConfigUpdate) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetSourceType sets the "source_type" field. +func (_u *BackupSourceConfigUpdate) SetSourceType(v backupsourceconfig.SourceType) *BackupSourceConfigUpdate { + _u.mutation.SetSourceType(v) + return _u +} + +// SetNillableSourceType sets the "source_type" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableSourceType(v *backupsourceconfig.SourceType) *BackupSourceConfigUpdate { + if v != nil { + _u.SetSourceType(*v) + } + return _u +} + +// SetHost sets the "host" field. +func (_u *BackupSourceConfigUpdate) SetHost(v string) *BackupSourceConfigUpdate { + _u.mutation.SetHost(v) + return _u +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableHost(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetHost(*v) + } + return _u +} + +// ClearHost clears the value of the "host" field. +func (_u *BackupSourceConfigUpdate) ClearHost() *BackupSourceConfigUpdate { + _u.mutation.ClearHost() + return _u +} + +// SetPort sets the "port" field. +func (_u *BackupSourceConfigUpdate) SetPort(v int) *BackupSourceConfigUpdate { + _u.mutation.ResetPort() + _u.mutation.SetPort(v) + return _u +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillablePort(v *int) *BackupSourceConfigUpdate { + if v != nil { + _u.SetPort(*v) + } + return _u +} + +// AddPort adds value to the "port" field. +func (_u *BackupSourceConfigUpdate) AddPort(v int) *BackupSourceConfigUpdate { + _u.mutation.AddPort(v) + return _u +} + +// ClearPort clears the value of the "port" field. +func (_u *BackupSourceConfigUpdate) ClearPort() *BackupSourceConfigUpdate { + _u.mutation.ClearPort() + return _u +} + +// SetUsername sets the "username" field. +func (_u *BackupSourceConfigUpdate) SetUsername(v string) *BackupSourceConfigUpdate { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableUsername(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// ClearUsername clears the value of the "username" field. +func (_u *BackupSourceConfigUpdate) ClearUsername() *BackupSourceConfigUpdate { + _u.mutation.ClearUsername() + return _u +} + +// SetPasswordEncrypted sets the "password_encrypted" field. +func (_u *BackupSourceConfigUpdate) SetPasswordEncrypted(v string) *BackupSourceConfigUpdate { + _u.mutation.SetPasswordEncrypted(v) + return _u +} + +// SetNillablePasswordEncrypted sets the "password_encrypted" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillablePasswordEncrypted(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetPasswordEncrypted(*v) + } + return _u +} + +// ClearPasswordEncrypted clears the value of the "password_encrypted" field. +func (_u *BackupSourceConfigUpdate) ClearPasswordEncrypted() *BackupSourceConfigUpdate { + _u.mutation.ClearPasswordEncrypted() + return _u +} + +// SetDatabase sets the "database" field. +func (_u *BackupSourceConfigUpdate) SetDatabase(v string) *BackupSourceConfigUpdate { + _u.mutation.SetDatabase(v) + return _u +} + +// SetNillableDatabase sets the "database" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableDatabase(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetDatabase(*v) + } + return _u +} + +// ClearDatabase clears the value of the "database" field. +func (_u *BackupSourceConfigUpdate) ClearDatabase() *BackupSourceConfigUpdate { + _u.mutation.ClearDatabase() + return _u +} + +// SetSslMode sets the "ssl_mode" field. +func (_u *BackupSourceConfigUpdate) SetSslMode(v string) *BackupSourceConfigUpdate { + _u.mutation.SetSslMode(v) + return _u +} + +// SetNillableSslMode sets the "ssl_mode" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableSslMode(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetSslMode(*v) + } + return _u +} + +// ClearSslMode clears the value of the "ssl_mode" field. +func (_u *BackupSourceConfigUpdate) ClearSslMode() *BackupSourceConfigUpdate { + _u.mutation.ClearSslMode() + return _u +} + +// SetAddr sets the "addr" field. +func (_u *BackupSourceConfigUpdate) SetAddr(v string) *BackupSourceConfigUpdate { + _u.mutation.SetAddr(v) + return _u +} + +// SetNillableAddr sets the "addr" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableAddr(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetAddr(*v) + } + return _u +} + +// ClearAddr clears the value of the "addr" field. +func (_u *BackupSourceConfigUpdate) ClearAddr() *BackupSourceConfigUpdate { + _u.mutation.ClearAddr() + return _u +} + +// SetRedisDb sets the "redis_db" field. +func (_u *BackupSourceConfigUpdate) SetRedisDb(v int) *BackupSourceConfigUpdate { + _u.mutation.ResetRedisDb() + _u.mutation.SetRedisDb(v) + return _u +} + +// SetNillableRedisDb sets the "redis_db" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableRedisDb(v *int) *BackupSourceConfigUpdate { + if v != nil { + _u.SetRedisDb(*v) + } + return _u +} + +// AddRedisDb adds value to the "redis_db" field. +func (_u *BackupSourceConfigUpdate) AddRedisDb(v int) *BackupSourceConfigUpdate { + _u.mutation.AddRedisDb(v) + return _u +} + +// ClearRedisDb clears the value of the "redis_db" field. +func (_u *BackupSourceConfigUpdate) ClearRedisDb() *BackupSourceConfigUpdate { + _u.mutation.ClearRedisDb() + return _u +} + +// SetContainerName sets the "container_name" field. +func (_u *BackupSourceConfigUpdate) SetContainerName(v string) *BackupSourceConfigUpdate { + _u.mutation.SetContainerName(v) + return _u +} + +// SetNillableContainerName sets the "container_name" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableContainerName(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetContainerName(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupSourceConfigUpdate) SetUpdatedAt(v time.Time) *BackupSourceConfigUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the BackupSourceConfigMutation object of the builder. +func (_u *BackupSourceConfigUpdate) Mutation() *BackupSourceConfigMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *BackupSourceConfigUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupSourceConfigUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *BackupSourceConfigUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupSourceConfigUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupSourceConfigUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backupsourceconfig.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupSourceConfigUpdate) check() error { + if v, ok := _u.mutation.SourceType(); ok { + if err := backupsourceconfig.SourceTypeValidator(v); err != nil { + return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} + } + } + return nil +} + +func (_u *BackupSourceConfigUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupsourceconfig.Table, backupsourceconfig.Columns, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.SourceType(); ok { + _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) + } + if value, ok := _u.mutation.Host(); ok { + _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) + } + if _u.mutation.HostCleared() { + _spec.ClearField(backupsourceconfig.FieldHost, field.TypeString) + } + if value, ok := _u.mutation.Port(); ok { + _spec.SetField(backupsourceconfig.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPort(); ok { + _spec.AddField(backupsourceconfig.FieldPort, field.TypeInt, value) + } + if _u.mutation.PortCleared() { + _spec.ClearField(backupsourceconfig.FieldPort, field.TypeInt) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(backupsourceconfig.FieldUsername, field.TypeString, value) + } + if _u.mutation.UsernameCleared() { + _spec.ClearField(backupsourceconfig.FieldUsername, field.TypeString) + } + if value, ok := _u.mutation.PasswordEncrypted(); ok { + _spec.SetField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString, value) + } + if _u.mutation.PasswordEncryptedCleared() { + _spec.ClearField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString) + } + if value, ok := _u.mutation.Database(); ok { + _spec.SetField(backupsourceconfig.FieldDatabase, field.TypeString, value) + } + if _u.mutation.DatabaseCleared() { + _spec.ClearField(backupsourceconfig.FieldDatabase, field.TypeString) + } + if value, ok := _u.mutation.SslMode(); ok { + _spec.SetField(backupsourceconfig.FieldSslMode, field.TypeString, value) + } + if _u.mutation.SslModeCleared() { + _spec.ClearField(backupsourceconfig.FieldSslMode, field.TypeString) + } + if value, ok := _u.mutation.Addr(); ok { + _spec.SetField(backupsourceconfig.FieldAddr, field.TypeString, value) + } + if _u.mutation.AddrCleared() { + _spec.ClearField(backupsourceconfig.FieldAddr, field.TypeString) + } + if value, ok := _u.mutation.RedisDb(); ok { + _spec.SetField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedRedisDb(); ok { + _spec.AddField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) + } + if _u.mutation.RedisDbCleared() { + _spec.ClearField(backupsourceconfig.FieldRedisDb, field.TypeInt) + } + if value, ok := _u.mutation.ContainerName(); ok { + _spec.SetField(backupsourceconfig.FieldContainerName, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backupsourceconfig.FieldUpdatedAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupsourceconfig.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// BackupSourceConfigUpdateOne is the builder for updating a single BackupSourceConfig entity. +type BackupSourceConfigUpdateOne struct { + config + fields []string + hooks []Hook + mutation *BackupSourceConfigMutation +} + +// SetSourceType sets the "source_type" field. +func (_u *BackupSourceConfigUpdateOne) SetSourceType(v backupsourceconfig.SourceType) *BackupSourceConfigUpdateOne { + _u.mutation.SetSourceType(v) + return _u +} + +// SetNillableSourceType sets the "source_type" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableSourceType(v *backupsourceconfig.SourceType) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetSourceType(*v) + } + return _u +} + +// SetHost sets the "host" field. +func (_u *BackupSourceConfigUpdateOne) SetHost(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetHost(v) + return _u +} + +// SetNillableHost sets the "host" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableHost(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetHost(*v) + } + return _u +} + +// ClearHost clears the value of the "host" field. +func (_u *BackupSourceConfigUpdateOne) ClearHost() *BackupSourceConfigUpdateOne { + _u.mutation.ClearHost() + return _u +} + +// SetPort sets the "port" field. +func (_u *BackupSourceConfigUpdateOne) SetPort(v int) *BackupSourceConfigUpdateOne { + _u.mutation.ResetPort() + _u.mutation.SetPort(v) + return _u +} + +// SetNillablePort sets the "port" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillablePort(v *int) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetPort(*v) + } + return _u +} + +// AddPort adds value to the "port" field. +func (_u *BackupSourceConfigUpdateOne) AddPort(v int) *BackupSourceConfigUpdateOne { + _u.mutation.AddPort(v) + return _u +} + +// ClearPort clears the value of the "port" field. +func (_u *BackupSourceConfigUpdateOne) ClearPort() *BackupSourceConfigUpdateOne { + _u.mutation.ClearPort() + return _u +} + +// SetUsername sets the "username" field. +func (_u *BackupSourceConfigUpdateOne) SetUsername(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetUsername(v) + return _u +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableUsername(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetUsername(*v) + } + return _u +} + +// ClearUsername clears the value of the "username" field. +func (_u *BackupSourceConfigUpdateOne) ClearUsername() *BackupSourceConfigUpdateOne { + _u.mutation.ClearUsername() + return _u +} + +// SetPasswordEncrypted sets the "password_encrypted" field. +func (_u *BackupSourceConfigUpdateOne) SetPasswordEncrypted(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetPasswordEncrypted(v) + return _u +} + +// SetNillablePasswordEncrypted sets the "password_encrypted" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillablePasswordEncrypted(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetPasswordEncrypted(*v) + } + return _u +} + +// ClearPasswordEncrypted clears the value of the "password_encrypted" field. +func (_u *BackupSourceConfigUpdateOne) ClearPasswordEncrypted() *BackupSourceConfigUpdateOne { + _u.mutation.ClearPasswordEncrypted() + return _u +} + +// SetDatabase sets the "database" field. +func (_u *BackupSourceConfigUpdateOne) SetDatabase(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetDatabase(v) + return _u +} + +// SetNillableDatabase sets the "database" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableDatabase(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetDatabase(*v) + } + return _u +} + +// ClearDatabase clears the value of the "database" field. +func (_u *BackupSourceConfigUpdateOne) ClearDatabase() *BackupSourceConfigUpdateOne { + _u.mutation.ClearDatabase() + return _u +} + +// SetSslMode sets the "ssl_mode" field. +func (_u *BackupSourceConfigUpdateOne) SetSslMode(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetSslMode(v) + return _u +} + +// SetNillableSslMode sets the "ssl_mode" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableSslMode(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetSslMode(*v) + } + return _u +} + +// ClearSslMode clears the value of the "ssl_mode" field. +func (_u *BackupSourceConfigUpdateOne) ClearSslMode() *BackupSourceConfigUpdateOne { + _u.mutation.ClearSslMode() + return _u +} + +// SetAddr sets the "addr" field. +func (_u *BackupSourceConfigUpdateOne) SetAddr(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetAddr(v) + return _u +} + +// SetNillableAddr sets the "addr" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableAddr(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetAddr(*v) + } + return _u +} + +// ClearAddr clears the value of the "addr" field. +func (_u *BackupSourceConfigUpdateOne) ClearAddr() *BackupSourceConfigUpdateOne { + _u.mutation.ClearAddr() + return _u +} + +// SetRedisDb sets the "redis_db" field. +func (_u *BackupSourceConfigUpdateOne) SetRedisDb(v int) *BackupSourceConfigUpdateOne { + _u.mutation.ResetRedisDb() + _u.mutation.SetRedisDb(v) + return _u +} + +// SetNillableRedisDb sets the "redis_db" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableRedisDb(v *int) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetRedisDb(*v) + } + return _u +} + +// AddRedisDb adds value to the "redis_db" field. +func (_u *BackupSourceConfigUpdateOne) AddRedisDb(v int) *BackupSourceConfigUpdateOne { + _u.mutation.AddRedisDb(v) + return _u +} + +// ClearRedisDb clears the value of the "redis_db" field. +func (_u *BackupSourceConfigUpdateOne) ClearRedisDb() *BackupSourceConfigUpdateOne { + _u.mutation.ClearRedisDb() + return _u +} + +// SetContainerName sets the "container_name" field. +func (_u *BackupSourceConfigUpdateOne) SetContainerName(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetContainerName(v) + return _u +} + +// SetNillableContainerName sets the "container_name" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableContainerName(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetContainerName(*v) + } + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *BackupSourceConfigUpdateOne) SetUpdatedAt(v time.Time) *BackupSourceConfigUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// Mutation returns the BackupSourceConfigMutation object of the builder. +func (_u *BackupSourceConfigUpdateOne) Mutation() *BackupSourceConfigMutation { + return _u.mutation +} + +// Where appends a list predicates to the BackupSourceConfigUpdate builder. +func (_u *BackupSourceConfigUpdateOne) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *BackupSourceConfigUpdateOne) Select(field string, fields ...string) *BackupSourceConfigUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated BackupSourceConfig entity. +func (_u *BackupSourceConfigUpdateOne) Save(ctx context.Context) (*BackupSourceConfig, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *BackupSourceConfigUpdateOne) SaveX(ctx context.Context) *BackupSourceConfig { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *BackupSourceConfigUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *BackupSourceConfigUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *BackupSourceConfigUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := backupsourceconfig.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *BackupSourceConfigUpdateOne) check() error { + if v, ok := _u.mutation.SourceType(); ok { + if err := backupsourceconfig.SourceTypeValidator(v); err != nil { + return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} + } + } + return nil +} + +func (_u *BackupSourceConfigUpdateOne) sqlSave(ctx context.Context) (_node *BackupSourceConfig, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(backupsourceconfig.Table, backupsourceconfig.Columns, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupSourceConfig.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, backupsourceconfig.FieldID) + for _, f := range fields { + if !backupsourceconfig.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != backupsourceconfig.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.SourceType(); ok { + _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) + } + if value, ok := _u.mutation.Host(); ok { + _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) + } + if _u.mutation.HostCleared() { + _spec.ClearField(backupsourceconfig.FieldHost, field.TypeString) + } + if value, ok := _u.mutation.Port(); ok { + _spec.SetField(backupsourceconfig.FieldPort, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedPort(); ok { + _spec.AddField(backupsourceconfig.FieldPort, field.TypeInt, value) + } + if _u.mutation.PortCleared() { + _spec.ClearField(backupsourceconfig.FieldPort, field.TypeInt) + } + if value, ok := _u.mutation.Username(); ok { + _spec.SetField(backupsourceconfig.FieldUsername, field.TypeString, value) + } + if _u.mutation.UsernameCleared() { + _spec.ClearField(backupsourceconfig.FieldUsername, field.TypeString) + } + if value, ok := _u.mutation.PasswordEncrypted(); ok { + _spec.SetField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString, value) + } + if _u.mutation.PasswordEncryptedCleared() { + _spec.ClearField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString) + } + if value, ok := _u.mutation.Database(); ok { + _spec.SetField(backupsourceconfig.FieldDatabase, field.TypeString, value) + } + if _u.mutation.DatabaseCleared() { + _spec.ClearField(backupsourceconfig.FieldDatabase, field.TypeString) + } + if value, ok := _u.mutation.SslMode(); ok { + _spec.SetField(backupsourceconfig.FieldSslMode, field.TypeString, value) + } + if _u.mutation.SslModeCleared() { + _spec.ClearField(backupsourceconfig.FieldSslMode, field.TypeString) + } + if value, ok := _u.mutation.Addr(); ok { + _spec.SetField(backupsourceconfig.FieldAddr, field.TypeString, value) + } + if _u.mutation.AddrCleared() { + _spec.ClearField(backupsourceconfig.FieldAddr, field.TypeString) + } + if value, ok := _u.mutation.RedisDb(); ok { + _spec.SetField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedRedisDb(); ok { + _spec.AddField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) + } + if _u.mutation.RedisDbCleared() { + _spec.ClearField(backupsourceconfig.FieldRedisDb, field.TypeInt) + } + if value, ok := _u.mutation.ContainerName(); ok { + _spec.SetField(backupsourceconfig.FieldContainerName, field.TypeString, value) + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(backupsourceconfig.FieldUpdatedAt, field.TypeTime, value) + } + _node = &BackupSourceConfig{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{backupsourceconfig.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backup/ent/client.go b/backup/ent/client.go new file mode 100644 index 000000000..3e6741ab4 --- /dev/null +++ b/backup/ent/client.go @@ -0,0 +1,947 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "github.com/Wei-Shaw/sub2api/backup/ent/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // BackupJob is the client for interacting with the BackupJob builders. + BackupJob *BackupJobClient + // BackupJobEvent is the client for interacting with the BackupJobEvent builders. + BackupJobEvent *BackupJobEventClient + // BackupS3Config is the client for interacting with the BackupS3Config builders. + BackupS3Config *BackupS3ConfigClient + // BackupSetting is the client for interacting with the BackupSetting builders. + BackupSetting *BackupSettingClient + // BackupSourceConfig is the client for interacting with the BackupSourceConfig builders. + BackupSourceConfig *BackupSourceConfigClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.BackupJob = NewBackupJobClient(c.config) + c.BackupJobEvent = NewBackupJobEventClient(c.config) + c.BackupS3Config = NewBackupS3ConfigClient(c.config) + c.BackupSetting = NewBackupSettingClient(c.config) + c.BackupSourceConfig = NewBackupSourceConfigClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + BackupJob: NewBackupJobClient(cfg), + BackupJobEvent: NewBackupJobEventClient(cfg), + BackupS3Config: NewBackupS3ConfigClient(cfg), + BackupSetting: NewBackupSettingClient(cfg), + BackupSourceConfig: NewBackupSourceConfigClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + BackupJob: NewBackupJobClient(cfg), + BackupJobEvent: NewBackupJobEventClient(cfg), + BackupS3Config: NewBackupS3ConfigClient(cfg), + BackupSetting: NewBackupSettingClient(cfg), + BackupSourceConfig: NewBackupSourceConfigClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// BackupJob. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + c.BackupJob.Use(hooks...) + c.BackupJobEvent.Use(hooks...) + c.BackupS3Config.Use(hooks...) + c.BackupSetting.Use(hooks...) + c.BackupSourceConfig.Use(hooks...) +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + c.BackupJob.Intercept(interceptors...) + c.BackupJobEvent.Intercept(interceptors...) + c.BackupS3Config.Intercept(interceptors...) + c.BackupSetting.Intercept(interceptors...) + c.BackupSourceConfig.Intercept(interceptors...) +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *BackupJobMutation: + return c.BackupJob.mutate(ctx, m) + case *BackupJobEventMutation: + return c.BackupJobEvent.mutate(ctx, m) + case *BackupS3ConfigMutation: + return c.BackupS3Config.mutate(ctx, m) + case *BackupSettingMutation: + return c.BackupSetting.mutate(ctx, m) + case *BackupSourceConfigMutation: + return c.BackupSourceConfig.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// BackupJobClient is a client for the BackupJob schema. +type BackupJobClient struct { + config +} + +// NewBackupJobClient returns a client for the BackupJob from the given config. +func NewBackupJobClient(c config) *BackupJobClient { + return &BackupJobClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `backupjob.Hooks(f(g(h())))`. +func (c *BackupJobClient) Use(hooks ...Hook) { + c.hooks.BackupJob = append(c.hooks.BackupJob, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `backupjob.Intercept(f(g(h())))`. +func (c *BackupJobClient) Intercept(interceptors ...Interceptor) { + c.inters.BackupJob = append(c.inters.BackupJob, interceptors...) +} + +// Create returns a builder for creating a BackupJob entity. +func (c *BackupJobClient) Create() *BackupJobCreate { + mutation := newBackupJobMutation(c.config, OpCreate) + return &BackupJobCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of BackupJob entities. +func (c *BackupJobClient) CreateBulk(builders ...*BackupJobCreate) *BackupJobCreateBulk { + return &BackupJobCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BackupJobClient) MapCreateBulk(slice any, setFunc func(*BackupJobCreate, int)) *BackupJobCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BackupJobCreateBulk{err: fmt.Errorf("calling to BackupJobClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BackupJobCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BackupJobCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for BackupJob. +func (c *BackupJobClient) Update() *BackupJobUpdate { + mutation := newBackupJobMutation(c.config, OpUpdate) + return &BackupJobUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BackupJobClient) UpdateOne(_m *BackupJob) *BackupJobUpdateOne { + mutation := newBackupJobMutation(c.config, OpUpdateOne, withBackupJob(_m)) + return &BackupJobUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BackupJobClient) UpdateOneID(id int) *BackupJobUpdateOne { + mutation := newBackupJobMutation(c.config, OpUpdateOne, withBackupJobID(id)) + return &BackupJobUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for BackupJob. +func (c *BackupJobClient) Delete() *BackupJobDelete { + mutation := newBackupJobMutation(c.config, OpDelete) + return &BackupJobDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *BackupJobClient) DeleteOne(_m *BackupJob) *BackupJobDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *BackupJobClient) DeleteOneID(id int) *BackupJobDeleteOne { + builder := c.Delete().Where(backupjob.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BackupJobDeleteOne{builder} +} + +// Query returns a query builder for BackupJob. +func (c *BackupJobClient) Query() *BackupJobQuery { + return &BackupJobQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeBackupJob}, + inters: c.Interceptors(), + } +} + +// Get returns a BackupJob entity by its id. +func (c *BackupJobClient) Get(ctx context.Context, id int) (*BackupJob, error) { + return c.Query().Where(backupjob.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BackupJobClient) GetX(ctx context.Context, id int) *BackupJob { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryEvents queries the events edge of a BackupJob. +func (c *BackupJobClient) QueryEvents(_m *BackupJob) *BackupJobEventQuery { + query := (&BackupJobEventClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(backupjob.Table, backupjob.FieldID, id), + sqlgraph.To(backupjobevent.Table, backupjobevent.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, backupjob.EventsTable, backupjob.EventsColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *BackupJobClient) Hooks() []Hook { + return c.hooks.BackupJob +} + +// Interceptors returns the client interceptors. +func (c *BackupJobClient) Interceptors() []Interceptor { + return c.inters.BackupJob +} + +func (c *BackupJobClient) mutate(ctx context.Context, m *BackupJobMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BackupJobCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BackupJobUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BackupJobUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BackupJobDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown BackupJob mutation op: %q", m.Op()) + } +} + +// BackupJobEventClient is a client for the BackupJobEvent schema. +type BackupJobEventClient struct { + config +} + +// NewBackupJobEventClient returns a client for the BackupJobEvent from the given config. +func NewBackupJobEventClient(c config) *BackupJobEventClient { + return &BackupJobEventClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `backupjobevent.Hooks(f(g(h())))`. +func (c *BackupJobEventClient) Use(hooks ...Hook) { + c.hooks.BackupJobEvent = append(c.hooks.BackupJobEvent, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `backupjobevent.Intercept(f(g(h())))`. +func (c *BackupJobEventClient) Intercept(interceptors ...Interceptor) { + c.inters.BackupJobEvent = append(c.inters.BackupJobEvent, interceptors...) +} + +// Create returns a builder for creating a BackupJobEvent entity. +func (c *BackupJobEventClient) Create() *BackupJobEventCreate { + mutation := newBackupJobEventMutation(c.config, OpCreate) + return &BackupJobEventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of BackupJobEvent entities. +func (c *BackupJobEventClient) CreateBulk(builders ...*BackupJobEventCreate) *BackupJobEventCreateBulk { + return &BackupJobEventCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BackupJobEventClient) MapCreateBulk(slice any, setFunc func(*BackupJobEventCreate, int)) *BackupJobEventCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BackupJobEventCreateBulk{err: fmt.Errorf("calling to BackupJobEventClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BackupJobEventCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BackupJobEventCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for BackupJobEvent. +func (c *BackupJobEventClient) Update() *BackupJobEventUpdate { + mutation := newBackupJobEventMutation(c.config, OpUpdate) + return &BackupJobEventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BackupJobEventClient) UpdateOne(_m *BackupJobEvent) *BackupJobEventUpdateOne { + mutation := newBackupJobEventMutation(c.config, OpUpdateOne, withBackupJobEvent(_m)) + return &BackupJobEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BackupJobEventClient) UpdateOneID(id int) *BackupJobEventUpdateOne { + mutation := newBackupJobEventMutation(c.config, OpUpdateOne, withBackupJobEventID(id)) + return &BackupJobEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for BackupJobEvent. +func (c *BackupJobEventClient) Delete() *BackupJobEventDelete { + mutation := newBackupJobEventMutation(c.config, OpDelete) + return &BackupJobEventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *BackupJobEventClient) DeleteOne(_m *BackupJobEvent) *BackupJobEventDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *BackupJobEventClient) DeleteOneID(id int) *BackupJobEventDeleteOne { + builder := c.Delete().Where(backupjobevent.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BackupJobEventDeleteOne{builder} +} + +// Query returns a query builder for BackupJobEvent. +func (c *BackupJobEventClient) Query() *BackupJobEventQuery { + return &BackupJobEventQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeBackupJobEvent}, + inters: c.Interceptors(), + } +} + +// Get returns a BackupJobEvent entity by its id. +func (c *BackupJobEventClient) Get(ctx context.Context, id int) (*BackupJobEvent, error) { + return c.Query().Where(backupjobevent.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BackupJobEventClient) GetX(ctx context.Context, id int) *BackupJobEvent { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryJob queries the job edge of a BackupJobEvent. +func (c *BackupJobEventClient) QueryJob(_m *BackupJobEvent) *BackupJobQuery { + query := (&BackupJobClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := _m.ID + step := sqlgraph.NewStep( + sqlgraph.From(backupjobevent.Table, backupjobevent.FieldID, id), + sqlgraph.To(backupjob.Table, backupjob.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, backupjobevent.JobTable, backupjobevent.JobColumn), + ) + fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *BackupJobEventClient) Hooks() []Hook { + return c.hooks.BackupJobEvent +} + +// Interceptors returns the client interceptors. +func (c *BackupJobEventClient) Interceptors() []Interceptor { + return c.inters.BackupJobEvent +} + +func (c *BackupJobEventClient) mutate(ctx context.Context, m *BackupJobEventMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BackupJobEventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BackupJobEventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BackupJobEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BackupJobEventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown BackupJobEvent mutation op: %q", m.Op()) + } +} + +// BackupS3ConfigClient is a client for the BackupS3Config schema. +type BackupS3ConfigClient struct { + config +} + +// NewBackupS3ConfigClient returns a client for the BackupS3Config from the given config. +func NewBackupS3ConfigClient(c config) *BackupS3ConfigClient { + return &BackupS3ConfigClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `backups3config.Hooks(f(g(h())))`. +func (c *BackupS3ConfigClient) Use(hooks ...Hook) { + c.hooks.BackupS3Config = append(c.hooks.BackupS3Config, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `backups3config.Intercept(f(g(h())))`. +func (c *BackupS3ConfigClient) Intercept(interceptors ...Interceptor) { + c.inters.BackupS3Config = append(c.inters.BackupS3Config, interceptors...) +} + +// Create returns a builder for creating a BackupS3Config entity. +func (c *BackupS3ConfigClient) Create() *BackupS3ConfigCreate { + mutation := newBackupS3ConfigMutation(c.config, OpCreate) + return &BackupS3ConfigCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of BackupS3Config entities. +func (c *BackupS3ConfigClient) CreateBulk(builders ...*BackupS3ConfigCreate) *BackupS3ConfigCreateBulk { + return &BackupS3ConfigCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BackupS3ConfigClient) MapCreateBulk(slice any, setFunc func(*BackupS3ConfigCreate, int)) *BackupS3ConfigCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BackupS3ConfigCreateBulk{err: fmt.Errorf("calling to BackupS3ConfigClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BackupS3ConfigCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BackupS3ConfigCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for BackupS3Config. +func (c *BackupS3ConfigClient) Update() *BackupS3ConfigUpdate { + mutation := newBackupS3ConfigMutation(c.config, OpUpdate) + return &BackupS3ConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BackupS3ConfigClient) UpdateOne(_m *BackupS3Config) *BackupS3ConfigUpdateOne { + mutation := newBackupS3ConfigMutation(c.config, OpUpdateOne, withBackupS3Config(_m)) + return &BackupS3ConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BackupS3ConfigClient) UpdateOneID(id int) *BackupS3ConfigUpdateOne { + mutation := newBackupS3ConfigMutation(c.config, OpUpdateOne, withBackupS3ConfigID(id)) + return &BackupS3ConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for BackupS3Config. +func (c *BackupS3ConfigClient) Delete() *BackupS3ConfigDelete { + mutation := newBackupS3ConfigMutation(c.config, OpDelete) + return &BackupS3ConfigDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *BackupS3ConfigClient) DeleteOne(_m *BackupS3Config) *BackupS3ConfigDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *BackupS3ConfigClient) DeleteOneID(id int) *BackupS3ConfigDeleteOne { + builder := c.Delete().Where(backups3config.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BackupS3ConfigDeleteOne{builder} +} + +// Query returns a query builder for BackupS3Config. +func (c *BackupS3ConfigClient) Query() *BackupS3ConfigQuery { + return &BackupS3ConfigQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeBackupS3Config}, + inters: c.Interceptors(), + } +} + +// Get returns a BackupS3Config entity by its id. +func (c *BackupS3ConfigClient) Get(ctx context.Context, id int) (*BackupS3Config, error) { + return c.Query().Where(backups3config.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BackupS3ConfigClient) GetX(ctx context.Context, id int) *BackupS3Config { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *BackupS3ConfigClient) Hooks() []Hook { + return c.hooks.BackupS3Config +} + +// Interceptors returns the client interceptors. +func (c *BackupS3ConfigClient) Interceptors() []Interceptor { + return c.inters.BackupS3Config +} + +func (c *BackupS3ConfigClient) mutate(ctx context.Context, m *BackupS3ConfigMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BackupS3ConfigCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BackupS3ConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BackupS3ConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BackupS3ConfigDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown BackupS3Config mutation op: %q", m.Op()) + } +} + +// BackupSettingClient is a client for the BackupSetting schema. +type BackupSettingClient struct { + config +} + +// NewBackupSettingClient returns a client for the BackupSetting from the given config. +func NewBackupSettingClient(c config) *BackupSettingClient { + return &BackupSettingClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `backupsetting.Hooks(f(g(h())))`. +func (c *BackupSettingClient) Use(hooks ...Hook) { + c.hooks.BackupSetting = append(c.hooks.BackupSetting, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `backupsetting.Intercept(f(g(h())))`. +func (c *BackupSettingClient) Intercept(interceptors ...Interceptor) { + c.inters.BackupSetting = append(c.inters.BackupSetting, interceptors...) +} + +// Create returns a builder for creating a BackupSetting entity. +func (c *BackupSettingClient) Create() *BackupSettingCreate { + mutation := newBackupSettingMutation(c.config, OpCreate) + return &BackupSettingCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of BackupSetting entities. +func (c *BackupSettingClient) CreateBulk(builders ...*BackupSettingCreate) *BackupSettingCreateBulk { + return &BackupSettingCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BackupSettingClient) MapCreateBulk(slice any, setFunc func(*BackupSettingCreate, int)) *BackupSettingCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BackupSettingCreateBulk{err: fmt.Errorf("calling to BackupSettingClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BackupSettingCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BackupSettingCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for BackupSetting. +func (c *BackupSettingClient) Update() *BackupSettingUpdate { + mutation := newBackupSettingMutation(c.config, OpUpdate) + return &BackupSettingUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BackupSettingClient) UpdateOne(_m *BackupSetting) *BackupSettingUpdateOne { + mutation := newBackupSettingMutation(c.config, OpUpdateOne, withBackupSetting(_m)) + return &BackupSettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BackupSettingClient) UpdateOneID(id int) *BackupSettingUpdateOne { + mutation := newBackupSettingMutation(c.config, OpUpdateOne, withBackupSettingID(id)) + return &BackupSettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for BackupSetting. +func (c *BackupSettingClient) Delete() *BackupSettingDelete { + mutation := newBackupSettingMutation(c.config, OpDelete) + return &BackupSettingDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *BackupSettingClient) DeleteOne(_m *BackupSetting) *BackupSettingDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *BackupSettingClient) DeleteOneID(id int) *BackupSettingDeleteOne { + builder := c.Delete().Where(backupsetting.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BackupSettingDeleteOne{builder} +} + +// Query returns a query builder for BackupSetting. +func (c *BackupSettingClient) Query() *BackupSettingQuery { + return &BackupSettingQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeBackupSetting}, + inters: c.Interceptors(), + } +} + +// Get returns a BackupSetting entity by its id. +func (c *BackupSettingClient) Get(ctx context.Context, id int) (*BackupSetting, error) { + return c.Query().Where(backupsetting.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BackupSettingClient) GetX(ctx context.Context, id int) *BackupSetting { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *BackupSettingClient) Hooks() []Hook { + return c.hooks.BackupSetting +} + +// Interceptors returns the client interceptors. +func (c *BackupSettingClient) Interceptors() []Interceptor { + return c.inters.BackupSetting +} + +func (c *BackupSettingClient) mutate(ctx context.Context, m *BackupSettingMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BackupSettingCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BackupSettingUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BackupSettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BackupSettingDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown BackupSetting mutation op: %q", m.Op()) + } +} + +// BackupSourceConfigClient is a client for the BackupSourceConfig schema. +type BackupSourceConfigClient struct { + config +} + +// NewBackupSourceConfigClient returns a client for the BackupSourceConfig from the given config. +func NewBackupSourceConfigClient(c config) *BackupSourceConfigClient { + return &BackupSourceConfigClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `backupsourceconfig.Hooks(f(g(h())))`. +func (c *BackupSourceConfigClient) Use(hooks ...Hook) { + c.hooks.BackupSourceConfig = append(c.hooks.BackupSourceConfig, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `backupsourceconfig.Intercept(f(g(h())))`. +func (c *BackupSourceConfigClient) Intercept(interceptors ...Interceptor) { + c.inters.BackupSourceConfig = append(c.inters.BackupSourceConfig, interceptors...) +} + +// Create returns a builder for creating a BackupSourceConfig entity. +func (c *BackupSourceConfigClient) Create() *BackupSourceConfigCreate { + mutation := newBackupSourceConfigMutation(c.config, OpCreate) + return &BackupSourceConfigCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of BackupSourceConfig entities. +func (c *BackupSourceConfigClient) CreateBulk(builders ...*BackupSourceConfigCreate) *BackupSourceConfigCreateBulk { + return &BackupSourceConfigCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *BackupSourceConfigClient) MapCreateBulk(slice any, setFunc func(*BackupSourceConfigCreate, int)) *BackupSourceConfigCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &BackupSourceConfigCreateBulk{err: fmt.Errorf("calling to BackupSourceConfigClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*BackupSourceConfigCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &BackupSourceConfigCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for BackupSourceConfig. +func (c *BackupSourceConfigClient) Update() *BackupSourceConfigUpdate { + mutation := newBackupSourceConfigMutation(c.config, OpUpdate) + return &BackupSourceConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *BackupSourceConfigClient) UpdateOne(_m *BackupSourceConfig) *BackupSourceConfigUpdateOne { + mutation := newBackupSourceConfigMutation(c.config, OpUpdateOne, withBackupSourceConfig(_m)) + return &BackupSourceConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *BackupSourceConfigClient) UpdateOneID(id int) *BackupSourceConfigUpdateOne { + mutation := newBackupSourceConfigMutation(c.config, OpUpdateOne, withBackupSourceConfigID(id)) + return &BackupSourceConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for BackupSourceConfig. +func (c *BackupSourceConfigClient) Delete() *BackupSourceConfigDelete { + mutation := newBackupSourceConfigMutation(c.config, OpDelete) + return &BackupSourceConfigDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *BackupSourceConfigClient) DeleteOne(_m *BackupSourceConfig) *BackupSourceConfigDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *BackupSourceConfigClient) DeleteOneID(id int) *BackupSourceConfigDeleteOne { + builder := c.Delete().Where(backupsourceconfig.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &BackupSourceConfigDeleteOne{builder} +} + +// Query returns a query builder for BackupSourceConfig. +func (c *BackupSourceConfigClient) Query() *BackupSourceConfigQuery { + return &BackupSourceConfigQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeBackupSourceConfig}, + inters: c.Interceptors(), + } +} + +// Get returns a BackupSourceConfig entity by its id. +func (c *BackupSourceConfigClient) Get(ctx context.Context, id int) (*BackupSourceConfig, error) { + return c.Query().Where(backupsourceconfig.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *BackupSourceConfigClient) GetX(ctx context.Context, id int) *BackupSourceConfig { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *BackupSourceConfigClient) Hooks() []Hook { + return c.hooks.BackupSourceConfig +} + +// Interceptors returns the client interceptors. +func (c *BackupSourceConfigClient) Interceptors() []Interceptor { + return c.inters.BackupSourceConfig +} + +func (c *BackupSourceConfigClient) mutate(ctx context.Context, m *BackupSourceConfigMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&BackupSourceConfigCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&BackupSourceConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&BackupSourceConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&BackupSourceConfigDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown BackupSourceConfig mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + BackupJob, BackupJobEvent, BackupS3Config, BackupSetting, + BackupSourceConfig []ent.Hook + } + inters struct { + BackupJob, BackupJobEvent, BackupS3Config, BackupSetting, + BackupSourceConfig []ent.Interceptor + } +) diff --git a/backup/ent/ent.go b/backup/ent/ent.go new file mode 100644 index 000000000..b6d986d43 --- /dev/null +++ b/backup/ent/ent.go @@ -0,0 +1,616 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// checkColumn checks if the column exists in the given table. +func checkColumn(t, c string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + backupjob.Table: backupjob.ValidColumn, + backupjobevent.Table: backupjobevent.ValidColumn, + backups3config.Table: backups3config.ValidColumn, + backupsetting.Table: backupsetting.ValidColumn, + backupsourceconfig.Table: backupsourceconfig.ValidColumn, + }) + }) + return columnCheck(t, c) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/backup/ent/enttest/enttest.go b/backup/ent/enttest/enttest.go new file mode 100644 index 000000000..975ea37b9 --- /dev/null +++ b/backup/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/Wei-Shaw/sub2api/backup/ent" + // required by schema hooks. + _ "github.com/Wei-Shaw/sub2api/backup/ent/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/Wei-Shaw/sub2api/backup/ent/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/backup/ent/generate.go b/backup/ent/generate.go new file mode 100644 index 000000000..9fdb1068c --- /dev/null +++ b/backup/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run entgo.io/ent/cmd/ent generate ./schema diff --git a/backup/ent/hook/hook.go b/backup/ent/hook/hook.go new file mode 100644 index 000000000..5db0dd794 --- /dev/null +++ b/backup/ent/hook/hook.go @@ -0,0 +1,247 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/Wei-Shaw/sub2api/backup/ent" +) + +// The BackupJobFunc type is an adapter to allow the use of ordinary +// function as BackupJob mutator. +type BackupJobFunc func(context.Context, *ent.BackupJobMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BackupJobFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.BackupJobMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupJobMutation", m) +} + +// The BackupJobEventFunc type is an adapter to allow the use of ordinary +// function as BackupJobEvent mutator. +type BackupJobEventFunc func(context.Context, *ent.BackupJobEventMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BackupJobEventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.BackupJobEventMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupJobEventMutation", m) +} + +// The BackupS3ConfigFunc type is an adapter to allow the use of ordinary +// function as BackupS3Config mutator. +type BackupS3ConfigFunc func(context.Context, *ent.BackupS3ConfigMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BackupS3ConfigFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.BackupS3ConfigMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupS3ConfigMutation", m) +} + +// The BackupSettingFunc type is an adapter to allow the use of ordinary +// function as BackupSetting mutator. +type BackupSettingFunc func(context.Context, *ent.BackupSettingMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BackupSettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.BackupSettingMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupSettingMutation", m) +} + +// The BackupSourceConfigFunc type is an adapter to allow the use of ordinary +// function as BackupSourceConfig mutator. +type BackupSourceConfigFunc func(context.Context, *ent.BackupSourceConfigMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f BackupSourceConfigFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.BackupSourceConfigMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupSourceConfigMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/backup/ent/migrate/migrate.go b/backup/ent/migrate/migrate.go new file mode 100644 index 000000000..1956a6bf6 --- /dev/null +++ b/backup/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/backup/ent/migrate/schema.go b/backup/ent/migrate/schema.go new file mode 100644 index 000000000..78af4965e --- /dev/null +++ b/backup/ent/migrate/schema.go @@ -0,0 +1,166 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // BackupJobsColumns holds the columns for the "backup_jobs" table. + BackupJobsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "job_id", Type: field.TypeString, Unique: true}, + {Name: "backup_type", Type: field.TypeEnum, Enums: []string{"postgres", "redis", "full"}}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"queued", "running", "succeeded", "failed", "partial_succeeded"}, Default: "queued"}, + {Name: "triggered_by", Type: field.TypeString, Default: "system"}, + {Name: "idempotency_key", Type: field.TypeString, Nullable: true}, + {Name: "upload_to_s3", Type: field.TypeBool, Default: false}, + {Name: "started_at", Type: field.TypeTime, Nullable: true}, + {Name: "finished_at", Type: field.TypeTime, Nullable: true}, + {Name: "error_message", Type: field.TypeString, Nullable: true}, + {Name: "artifact_local_path", Type: field.TypeString, Nullable: true}, + {Name: "artifact_size_bytes", Type: field.TypeInt64, Nullable: true}, + {Name: "artifact_sha256", Type: field.TypeString, Nullable: true}, + {Name: "s3_bucket", Type: field.TypeString, Nullable: true}, + {Name: "s3_key", Type: field.TypeString, Nullable: true}, + {Name: "s3_etag", Type: field.TypeString, Nullable: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // BackupJobsTable holds the schema information for the "backup_jobs" table. + BackupJobsTable = &schema.Table{ + Name: "backup_jobs", + Columns: BackupJobsColumns, + PrimaryKey: []*schema.Column{BackupJobsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "backupjob_status_created_at", + Unique: false, + Columns: []*schema.Column{BackupJobsColumns[3], BackupJobsColumns[16]}, + }, + { + Name: "backupjob_backup_type_created_at", + Unique: false, + Columns: []*schema.Column{BackupJobsColumns[2], BackupJobsColumns[16]}, + }, + { + Name: "backupjob_idempotency_key", + Unique: false, + Columns: []*schema.Column{BackupJobsColumns[5]}, + }, + }, + } + // BackupJobEventsColumns holds the columns for the "backup_job_events" table. + BackupJobEventsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "level", Type: field.TypeEnum, Enums: []string{"info", "warning", "error"}, Default: "info"}, + {Name: "event_type", Type: field.TypeString, Default: "state_change"}, + {Name: "message", Type: field.TypeString}, + {Name: "payload", Type: field.TypeString, Nullable: true}, + {Name: "event_time", Type: field.TypeTime}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "backup_job_id", Type: field.TypeInt}, + } + // BackupJobEventsTable holds the schema information for the "backup_job_events" table. + BackupJobEventsTable = &schema.Table{ + Name: "backup_job_events", + Columns: BackupJobEventsColumns, + PrimaryKey: []*schema.Column{BackupJobEventsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "backup_job_events_backup_jobs_job", + Columns: []*schema.Column{BackupJobEventsColumns[7]}, + RefColumns: []*schema.Column{BackupJobsColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "backupjobevent_backup_job_id_event_time", + Unique: false, + Columns: []*schema.Column{BackupJobEventsColumns[7], BackupJobEventsColumns[5]}, + }, + }, + } + // BackupS3configsColumns holds the columns for the "backup_s3configs" table. + BackupS3configsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "enabled", Type: field.TypeBool, Default: false}, + {Name: "endpoint", Type: field.TypeString, Default: ""}, + {Name: "region", Type: field.TypeString, Default: ""}, + {Name: "bucket", Type: field.TypeString, Default: ""}, + {Name: "access_key_id", Type: field.TypeString, Default: ""}, + {Name: "secret_access_key_encrypted", Type: field.TypeString, Nullable: true}, + {Name: "prefix", Type: field.TypeString, Default: ""}, + {Name: "force_path_style", Type: field.TypeBool, Default: false}, + {Name: "use_ssl", Type: field.TypeBool, Default: true}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // BackupS3configsTable holds the schema information for the "backup_s3configs" table. + BackupS3configsTable = &schema.Table{ + Name: "backup_s3configs", + Columns: BackupS3configsColumns, + PrimaryKey: []*schema.Column{BackupS3configsColumns[0]}, + } + // BackupSettingsColumns holds the columns for the "backup_settings" table. + BackupSettingsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "source_mode", Type: field.TypeEnum, Enums: []string{"direct", "docker_exec"}, Default: "direct"}, + {Name: "backup_root", Type: field.TypeString, Default: "/var/lib/sub2api/backups"}, + {Name: "retention_days", Type: field.TypeInt, Default: 7}, + {Name: "keep_last", Type: field.TypeInt, Default: 30}, + {Name: "sqlite_path", Type: field.TypeString, Default: "/var/lib/sub2api/backupd.db"}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // BackupSettingsTable holds the schema information for the "backup_settings" table. + BackupSettingsTable = &schema.Table{ + Name: "backup_settings", + Columns: BackupSettingsColumns, + PrimaryKey: []*schema.Column{BackupSettingsColumns[0]}, + } + // BackupSourceConfigsColumns holds the columns for the "backup_source_configs" table. + BackupSourceConfigsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "source_type", Type: field.TypeEnum, Enums: []string{"postgres", "redis"}}, + {Name: "host", Type: field.TypeString, Nullable: true}, + {Name: "port", Type: field.TypeInt, Nullable: true}, + {Name: "username", Type: field.TypeString, Nullable: true}, + {Name: "password_encrypted", Type: field.TypeString, Nullable: true}, + {Name: "database", Type: field.TypeString, Nullable: true}, + {Name: "ssl_mode", Type: field.TypeString, Nullable: true}, + {Name: "addr", Type: field.TypeString, Nullable: true}, + {Name: "redis_db", Type: field.TypeInt, Nullable: true}, + {Name: "container_name", Type: field.TypeString, Default: ""}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + } + // BackupSourceConfigsTable holds the schema information for the "backup_source_configs" table. + BackupSourceConfigsTable = &schema.Table{ + Name: "backup_source_configs", + Columns: BackupSourceConfigsColumns, + PrimaryKey: []*schema.Column{BackupSourceConfigsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "backupsourceconfig_source_type", + Unique: true, + Columns: []*schema.Column{BackupSourceConfigsColumns[1]}, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + BackupJobsTable, + BackupJobEventsTable, + BackupS3configsTable, + BackupSettingsTable, + BackupSourceConfigsTable, + } +) + +func init() { + BackupJobEventsTable.ForeignKeys[0].RefTable = BackupJobsTable +} diff --git a/backup/ent/mutation.go b/backup/ent/mutation.go new file mode 100644 index 000000000..351ecea0d --- /dev/null +++ b/backup/ent/mutation.go @@ -0,0 +1,5044 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" + "github.com/Wei-Shaw/sub2api/backup/ent/predicate" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeBackupJob = "BackupJob" + TypeBackupJobEvent = "BackupJobEvent" + TypeBackupS3Config = "BackupS3Config" + TypeBackupSetting = "BackupSetting" + TypeBackupSourceConfig = "BackupSourceConfig" +) + +// BackupJobMutation represents an operation that mutates the BackupJob nodes in the graph. +type BackupJobMutation struct { + config + op Op + typ string + id *int + job_id *string + backup_type *backupjob.BackupType + status *backupjob.Status + triggered_by *string + idempotency_key *string + upload_to_s3 *bool + started_at *time.Time + finished_at *time.Time + error_message *string + artifact_local_path *string + artifact_size_bytes *int64 + addartifact_size_bytes *int64 + artifact_sha256 *string + s3_bucket *string + s3_key *string + s3_etag *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + events map[int]struct{} + removedevents map[int]struct{} + clearedevents bool + done bool + oldValue func(context.Context) (*BackupJob, error) + predicates []predicate.BackupJob +} + +var _ ent.Mutation = (*BackupJobMutation)(nil) + +// backupjobOption allows management of the mutation configuration using functional options. +type backupjobOption func(*BackupJobMutation) + +// newBackupJobMutation creates new mutation for the BackupJob entity. +func newBackupJobMutation(c config, op Op, opts ...backupjobOption) *BackupJobMutation { + m := &BackupJobMutation{ + config: c, + op: op, + typ: TypeBackupJob, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBackupJobID sets the ID field of the mutation. +func withBackupJobID(id int) backupjobOption { + return func(m *BackupJobMutation) { + var ( + err error + once sync.Once + value *BackupJob + ) + m.oldValue = func(ctx context.Context) (*BackupJob, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().BackupJob.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBackupJob sets the old BackupJob of the mutation. +func withBackupJob(node *BackupJob) backupjobOption { + return func(m *BackupJobMutation) { + m.oldValue = func(context.Context) (*BackupJob, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BackupJobMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BackupJobMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *BackupJobMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *BackupJobMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().BackupJob.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetJobID sets the "job_id" field. +func (m *BackupJobMutation) SetJobID(s string) { + m.job_id = &s +} + +// JobID returns the value of the "job_id" field in the mutation. +func (m *BackupJobMutation) JobID() (r string, exists bool) { + v := m.job_id + if v == nil { + return + } + return *v, true +} + +// OldJobID returns the old "job_id" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldJobID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldJobID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldJobID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldJobID: %w", err) + } + return oldValue.JobID, nil +} + +// ResetJobID resets all changes to the "job_id" field. +func (m *BackupJobMutation) ResetJobID() { + m.job_id = nil +} + +// SetBackupType sets the "backup_type" field. +func (m *BackupJobMutation) SetBackupType(bt backupjob.BackupType) { + m.backup_type = &bt +} + +// BackupType returns the value of the "backup_type" field in the mutation. +func (m *BackupJobMutation) BackupType() (r backupjob.BackupType, exists bool) { + v := m.backup_type + if v == nil { + return + } + return *v, true +} + +// OldBackupType returns the old "backup_type" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldBackupType(ctx context.Context) (v backupjob.BackupType, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBackupType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBackupType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBackupType: %w", err) + } + return oldValue.BackupType, nil +} + +// ResetBackupType resets all changes to the "backup_type" field. +func (m *BackupJobMutation) ResetBackupType() { + m.backup_type = nil +} + +// SetStatus sets the "status" field. +func (m *BackupJobMutation) SetStatus(b backupjob.Status) { + m.status = &b +} + +// Status returns the value of the "status" field in the mutation. +func (m *BackupJobMutation) Status() (r backupjob.Status, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldStatus(ctx context.Context) (v backupjob.Status, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *BackupJobMutation) ResetStatus() { + m.status = nil +} + +// SetTriggeredBy sets the "triggered_by" field. +func (m *BackupJobMutation) SetTriggeredBy(s string) { + m.triggered_by = &s +} + +// TriggeredBy returns the value of the "triggered_by" field in the mutation. +func (m *BackupJobMutation) TriggeredBy() (r string, exists bool) { + v := m.triggered_by + if v == nil { + return + } + return *v, true +} + +// OldTriggeredBy returns the old "triggered_by" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldTriggeredBy(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTriggeredBy is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTriggeredBy requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTriggeredBy: %w", err) + } + return oldValue.TriggeredBy, nil +} + +// ResetTriggeredBy resets all changes to the "triggered_by" field. +func (m *BackupJobMutation) ResetTriggeredBy() { + m.triggered_by = nil +} + +// SetIdempotencyKey sets the "idempotency_key" field. +func (m *BackupJobMutation) SetIdempotencyKey(s string) { + m.idempotency_key = &s +} + +// IdempotencyKey returns the value of the "idempotency_key" field in the mutation. +func (m *BackupJobMutation) IdempotencyKey() (r string, exists bool) { + v := m.idempotency_key + if v == nil { + return + } + return *v, true +} + +// OldIdempotencyKey returns the old "idempotency_key" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldIdempotencyKey(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIdempotencyKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIdempotencyKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIdempotencyKey: %w", err) + } + return oldValue.IdempotencyKey, nil +} + +// ClearIdempotencyKey clears the value of the "idempotency_key" field. +func (m *BackupJobMutation) ClearIdempotencyKey() { + m.idempotency_key = nil + m.clearedFields[backupjob.FieldIdempotencyKey] = struct{}{} +} + +// IdempotencyKeyCleared returns if the "idempotency_key" field was cleared in this mutation. +func (m *BackupJobMutation) IdempotencyKeyCleared() bool { + _, ok := m.clearedFields[backupjob.FieldIdempotencyKey] + return ok +} + +// ResetIdempotencyKey resets all changes to the "idempotency_key" field. +func (m *BackupJobMutation) ResetIdempotencyKey() { + m.idempotency_key = nil + delete(m.clearedFields, backupjob.FieldIdempotencyKey) +} + +// SetUploadToS3 sets the "upload_to_s3" field. +func (m *BackupJobMutation) SetUploadToS3(b bool) { + m.upload_to_s3 = &b +} + +// UploadToS3 returns the value of the "upload_to_s3" field in the mutation. +func (m *BackupJobMutation) UploadToS3() (r bool, exists bool) { + v := m.upload_to_s3 + if v == nil { + return + } + return *v, true +} + +// OldUploadToS3 returns the old "upload_to_s3" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldUploadToS3(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUploadToS3 is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUploadToS3 requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUploadToS3: %w", err) + } + return oldValue.UploadToS3, nil +} + +// ResetUploadToS3 resets all changes to the "upload_to_s3" field. +func (m *BackupJobMutation) ResetUploadToS3() { + m.upload_to_s3 = nil +} + +// SetStartedAt sets the "started_at" field. +func (m *BackupJobMutation) SetStartedAt(t time.Time) { + m.started_at = &t +} + +// StartedAt returns the value of the "started_at" field in the mutation. +func (m *BackupJobMutation) StartedAt() (r time.Time, exists bool) { + v := m.started_at + if v == nil { + return + } + return *v, true +} + +// OldStartedAt returns the old "started_at" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldStartedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStartedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStartedAt: %w", err) + } + return oldValue.StartedAt, nil +} + +// ClearStartedAt clears the value of the "started_at" field. +func (m *BackupJobMutation) ClearStartedAt() { + m.started_at = nil + m.clearedFields[backupjob.FieldStartedAt] = struct{}{} +} + +// StartedAtCleared returns if the "started_at" field was cleared in this mutation. +func (m *BackupJobMutation) StartedAtCleared() bool { + _, ok := m.clearedFields[backupjob.FieldStartedAt] + return ok +} + +// ResetStartedAt resets all changes to the "started_at" field. +func (m *BackupJobMutation) ResetStartedAt() { + m.started_at = nil + delete(m.clearedFields, backupjob.FieldStartedAt) +} + +// SetFinishedAt sets the "finished_at" field. +func (m *BackupJobMutation) SetFinishedAt(t time.Time) { + m.finished_at = &t +} + +// FinishedAt returns the value of the "finished_at" field in the mutation. +func (m *BackupJobMutation) FinishedAt() (r time.Time, exists bool) { + v := m.finished_at + if v == nil { + return + } + return *v, true +} + +// OldFinishedAt returns the old "finished_at" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldFinishedAt(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFinishedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFinishedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFinishedAt: %w", err) + } + return oldValue.FinishedAt, nil +} + +// ClearFinishedAt clears the value of the "finished_at" field. +func (m *BackupJobMutation) ClearFinishedAt() { + m.finished_at = nil + m.clearedFields[backupjob.FieldFinishedAt] = struct{}{} +} + +// FinishedAtCleared returns if the "finished_at" field was cleared in this mutation. +func (m *BackupJobMutation) FinishedAtCleared() bool { + _, ok := m.clearedFields[backupjob.FieldFinishedAt] + return ok +} + +// ResetFinishedAt resets all changes to the "finished_at" field. +func (m *BackupJobMutation) ResetFinishedAt() { + m.finished_at = nil + delete(m.clearedFields, backupjob.FieldFinishedAt) +} + +// SetErrorMessage sets the "error_message" field. +func (m *BackupJobMutation) SetErrorMessage(s string) { + m.error_message = &s +} + +// ErrorMessage returns the value of the "error_message" field in the mutation. +func (m *BackupJobMutation) ErrorMessage() (r string, exists bool) { + v := m.error_message + if v == nil { + return + } + return *v, true +} + +// OldErrorMessage returns the old "error_message" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldErrorMessage(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorMessage: %w", err) + } + return oldValue.ErrorMessage, nil +} + +// ClearErrorMessage clears the value of the "error_message" field. +func (m *BackupJobMutation) ClearErrorMessage() { + m.error_message = nil + m.clearedFields[backupjob.FieldErrorMessage] = struct{}{} +} + +// ErrorMessageCleared returns if the "error_message" field was cleared in this mutation. +func (m *BackupJobMutation) ErrorMessageCleared() bool { + _, ok := m.clearedFields[backupjob.FieldErrorMessage] + return ok +} + +// ResetErrorMessage resets all changes to the "error_message" field. +func (m *BackupJobMutation) ResetErrorMessage() { + m.error_message = nil + delete(m.clearedFields, backupjob.FieldErrorMessage) +} + +// SetArtifactLocalPath sets the "artifact_local_path" field. +func (m *BackupJobMutation) SetArtifactLocalPath(s string) { + m.artifact_local_path = &s +} + +// ArtifactLocalPath returns the value of the "artifact_local_path" field in the mutation. +func (m *BackupJobMutation) ArtifactLocalPath() (r string, exists bool) { + v := m.artifact_local_path + if v == nil { + return + } + return *v, true +} + +// OldArtifactLocalPath returns the old "artifact_local_path" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldArtifactLocalPath(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldArtifactLocalPath is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldArtifactLocalPath requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldArtifactLocalPath: %w", err) + } + return oldValue.ArtifactLocalPath, nil +} + +// ClearArtifactLocalPath clears the value of the "artifact_local_path" field. +func (m *BackupJobMutation) ClearArtifactLocalPath() { + m.artifact_local_path = nil + m.clearedFields[backupjob.FieldArtifactLocalPath] = struct{}{} +} + +// ArtifactLocalPathCleared returns if the "artifact_local_path" field was cleared in this mutation. +func (m *BackupJobMutation) ArtifactLocalPathCleared() bool { + _, ok := m.clearedFields[backupjob.FieldArtifactLocalPath] + return ok +} + +// ResetArtifactLocalPath resets all changes to the "artifact_local_path" field. +func (m *BackupJobMutation) ResetArtifactLocalPath() { + m.artifact_local_path = nil + delete(m.clearedFields, backupjob.FieldArtifactLocalPath) +} + +// SetArtifactSizeBytes sets the "artifact_size_bytes" field. +func (m *BackupJobMutation) SetArtifactSizeBytes(i int64) { + m.artifact_size_bytes = &i + m.addartifact_size_bytes = nil +} + +// ArtifactSizeBytes returns the value of the "artifact_size_bytes" field in the mutation. +func (m *BackupJobMutation) ArtifactSizeBytes() (r int64, exists bool) { + v := m.artifact_size_bytes + if v == nil { + return + } + return *v, true +} + +// OldArtifactSizeBytes returns the old "artifact_size_bytes" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldArtifactSizeBytes(ctx context.Context) (v *int64, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldArtifactSizeBytes is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldArtifactSizeBytes requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldArtifactSizeBytes: %w", err) + } + return oldValue.ArtifactSizeBytes, nil +} + +// AddArtifactSizeBytes adds i to the "artifact_size_bytes" field. +func (m *BackupJobMutation) AddArtifactSizeBytes(i int64) { + if m.addartifact_size_bytes != nil { + *m.addartifact_size_bytes += i + } else { + m.addartifact_size_bytes = &i + } +} + +// AddedArtifactSizeBytes returns the value that was added to the "artifact_size_bytes" field in this mutation. +func (m *BackupJobMutation) AddedArtifactSizeBytes() (r int64, exists bool) { + v := m.addartifact_size_bytes + if v == nil { + return + } + return *v, true +} + +// ClearArtifactSizeBytes clears the value of the "artifact_size_bytes" field. +func (m *BackupJobMutation) ClearArtifactSizeBytes() { + m.artifact_size_bytes = nil + m.addartifact_size_bytes = nil + m.clearedFields[backupjob.FieldArtifactSizeBytes] = struct{}{} +} + +// ArtifactSizeBytesCleared returns if the "artifact_size_bytes" field was cleared in this mutation. +func (m *BackupJobMutation) ArtifactSizeBytesCleared() bool { + _, ok := m.clearedFields[backupjob.FieldArtifactSizeBytes] + return ok +} + +// ResetArtifactSizeBytes resets all changes to the "artifact_size_bytes" field. +func (m *BackupJobMutation) ResetArtifactSizeBytes() { + m.artifact_size_bytes = nil + m.addartifact_size_bytes = nil + delete(m.clearedFields, backupjob.FieldArtifactSizeBytes) +} + +// SetArtifactSha256 sets the "artifact_sha256" field. +func (m *BackupJobMutation) SetArtifactSha256(s string) { + m.artifact_sha256 = &s +} + +// ArtifactSha256 returns the value of the "artifact_sha256" field in the mutation. +func (m *BackupJobMutation) ArtifactSha256() (r string, exists bool) { + v := m.artifact_sha256 + if v == nil { + return + } + return *v, true +} + +// OldArtifactSha256 returns the old "artifact_sha256" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldArtifactSha256(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldArtifactSha256 is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldArtifactSha256 requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldArtifactSha256: %w", err) + } + return oldValue.ArtifactSha256, nil +} + +// ClearArtifactSha256 clears the value of the "artifact_sha256" field. +func (m *BackupJobMutation) ClearArtifactSha256() { + m.artifact_sha256 = nil + m.clearedFields[backupjob.FieldArtifactSha256] = struct{}{} +} + +// ArtifactSha256Cleared returns if the "artifact_sha256" field was cleared in this mutation. +func (m *BackupJobMutation) ArtifactSha256Cleared() bool { + _, ok := m.clearedFields[backupjob.FieldArtifactSha256] + return ok +} + +// ResetArtifactSha256 resets all changes to the "artifact_sha256" field. +func (m *BackupJobMutation) ResetArtifactSha256() { + m.artifact_sha256 = nil + delete(m.clearedFields, backupjob.FieldArtifactSha256) +} + +// SetS3Bucket sets the "s3_bucket" field. +func (m *BackupJobMutation) SetS3Bucket(s string) { + m.s3_bucket = &s +} + +// S3Bucket returns the value of the "s3_bucket" field in the mutation. +func (m *BackupJobMutation) S3Bucket() (r string, exists bool) { + v := m.s3_bucket + if v == nil { + return + } + return *v, true +} + +// OldS3Bucket returns the old "s3_bucket" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldS3Bucket(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldS3Bucket is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldS3Bucket requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldS3Bucket: %w", err) + } + return oldValue.S3Bucket, nil +} + +// ClearS3Bucket clears the value of the "s3_bucket" field. +func (m *BackupJobMutation) ClearS3Bucket() { + m.s3_bucket = nil + m.clearedFields[backupjob.FieldS3Bucket] = struct{}{} +} + +// S3BucketCleared returns if the "s3_bucket" field was cleared in this mutation. +func (m *BackupJobMutation) S3BucketCleared() bool { + _, ok := m.clearedFields[backupjob.FieldS3Bucket] + return ok +} + +// ResetS3Bucket resets all changes to the "s3_bucket" field. +func (m *BackupJobMutation) ResetS3Bucket() { + m.s3_bucket = nil + delete(m.clearedFields, backupjob.FieldS3Bucket) +} + +// SetS3Key sets the "s3_key" field. +func (m *BackupJobMutation) SetS3Key(s string) { + m.s3_key = &s +} + +// S3Key returns the value of the "s3_key" field in the mutation. +func (m *BackupJobMutation) S3Key() (r string, exists bool) { + v := m.s3_key + if v == nil { + return + } + return *v, true +} + +// OldS3Key returns the old "s3_key" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldS3Key(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldS3Key is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldS3Key requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldS3Key: %w", err) + } + return oldValue.S3Key, nil +} + +// ClearS3Key clears the value of the "s3_key" field. +func (m *BackupJobMutation) ClearS3Key() { + m.s3_key = nil + m.clearedFields[backupjob.FieldS3Key] = struct{}{} +} + +// S3KeyCleared returns if the "s3_key" field was cleared in this mutation. +func (m *BackupJobMutation) S3KeyCleared() bool { + _, ok := m.clearedFields[backupjob.FieldS3Key] + return ok +} + +// ResetS3Key resets all changes to the "s3_key" field. +func (m *BackupJobMutation) ResetS3Key() { + m.s3_key = nil + delete(m.clearedFields, backupjob.FieldS3Key) +} + +// SetS3Etag sets the "s3_etag" field. +func (m *BackupJobMutation) SetS3Etag(s string) { + m.s3_etag = &s +} + +// S3Etag returns the value of the "s3_etag" field in the mutation. +func (m *BackupJobMutation) S3Etag() (r string, exists bool) { + v := m.s3_etag + if v == nil { + return + } + return *v, true +} + +// OldS3Etag returns the old "s3_etag" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldS3Etag(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldS3Etag is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldS3Etag requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldS3Etag: %w", err) + } + return oldValue.S3Etag, nil +} + +// ClearS3Etag clears the value of the "s3_etag" field. +func (m *BackupJobMutation) ClearS3Etag() { + m.s3_etag = nil + m.clearedFields[backupjob.FieldS3Etag] = struct{}{} +} + +// S3EtagCleared returns if the "s3_etag" field was cleared in this mutation. +func (m *BackupJobMutation) S3EtagCleared() bool { + _, ok := m.clearedFields[backupjob.FieldS3Etag] + return ok +} + +// ResetS3Etag resets all changes to the "s3_etag" field. +func (m *BackupJobMutation) ResetS3Etag() { + m.s3_etag = nil + delete(m.clearedFields, backupjob.FieldS3Etag) +} + +// SetCreatedAt sets the "created_at" field. +func (m *BackupJobMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *BackupJobMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *BackupJobMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *BackupJobMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *BackupJobMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *BackupJobMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// AddEventIDs adds the "events" edge to the BackupJobEvent entity by ids. +func (m *BackupJobMutation) AddEventIDs(ids ...int) { + if m.events == nil { + m.events = make(map[int]struct{}) + } + for i := range ids { + m.events[ids[i]] = struct{}{} + } +} + +// ClearEvents clears the "events" edge to the BackupJobEvent entity. +func (m *BackupJobMutation) ClearEvents() { + m.clearedevents = true +} + +// EventsCleared reports if the "events" edge to the BackupJobEvent entity was cleared. +func (m *BackupJobMutation) EventsCleared() bool { + return m.clearedevents +} + +// RemoveEventIDs removes the "events" edge to the BackupJobEvent entity by IDs. +func (m *BackupJobMutation) RemoveEventIDs(ids ...int) { + if m.removedevents == nil { + m.removedevents = make(map[int]struct{}) + } + for i := range ids { + delete(m.events, ids[i]) + m.removedevents[ids[i]] = struct{}{} + } +} + +// RemovedEvents returns the removed IDs of the "events" edge to the BackupJobEvent entity. +func (m *BackupJobMutation) RemovedEventsIDs() (ids []int) { + for id := range m.removedevents { + ids = append(ids, id) + } + return +} + +// EventsIDs returns the "events" edge IDs in the mutation. +func (m *BackupJobMutation) EventsIDs() (ids []int) { + for id := range m.events { + ids = append(ids, id) + } + return +} + +// ResetEvents resets all changes to the "events" edge. +func (m *BackupJobMutation) ResetEvents() { + m.events = nil + m.clearedevents = false + m.removedevents = nil +} + +// Where appends a list predicates to the BackupJobMutation builder. +func (m *BackupJobMutation) Where(ps ...predicate.BackupJob) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the BackupJobMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BackupJobMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.BackupJob, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *BackupJobMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *BackupJobMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (BackupJob). +func (m *BackupJobMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *BackupJobMutation) Fields() []string { + fields := make([]string, 0, 17) + if m.job_id != nil { + fields = append(fields, backupjob.FieldJobID) + } + if m.backup_type != nil { + fields = append(fields, backupjob.FieldBackupType) + } + if m.status != nil { + fields = append(fields, backupjob.FieldStatus) + } + if m.triggered_by != nil { + fields = append(fields, backupjob.FieldTriggeredBy) + } + if m.idempotency_key != nil { + fields = append(fields, backupjob.FieldIdempotencyKey) + } + if m.upload_to_s3 != nil { + fields = append(fields, backupjob.FieldUploadToS3) + } + if m.started_at != nil { + fields = append(fields, backupjob.FieldStartedAt) + } + if m.finished_at != nil { + fields = append(fields, backupjob.FieldFinishedAt) + } + if m.error_message != nil { + fields = append(fields, backupjob.FieldErrorMessage) + } + if m.artifact_local_path != nil { + fields = append(fields, backupjob.FieldArtifactLocalPath) + } + if m.artifact_size_bytes != nil { + fields = append(fields, backupjob.FieldArtifactSizeBytes) + } + if m.artifact_sha256 != nil { + fields = append(fields, backupjob.FieldArtifactSha256) + } + if m.s3_bucket != nil { + fields = append(fields, backupjob.FieldS3Bucket) + } + if m.s3_key != nil { + fields = append(fields, backupjob.FieldS3Key) + } + if m.s3_etag != nil { + fields = append(fields, backupjob.FieldS3Etag) + } + if m.created_at != nil { + fields = append(fields, backupjob.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, backupjob.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *BackupJobMutation) Field(name string) (ent.Value, bool) { + switch name { + case backupjob.FieldJobID: + return m.JobID() + case backupjob.FieldBackupType: + return m.BackupType() + case backupjob.FieldStatus: + return m.Status() + case backupjob.FieldTriggeredBy: + return m.TriggeredBy() + case backupjob.FieldIdempotencyKey: + return m.IdempotencyKey() + case backupjob.FieldUploadToS3: + return m.UploadToS3() + case backupjob.FieldStartedAt: + return m.StartedAt() + case backupjob.FieldFinishedAt: + return m.FinishedAt() + case backupjob.FieldErrorMessage: + return m.ErrorMessage() + case backupjob.FieldArtifactLocalPath: + return m.ArtifactLocalPath() + case backupjob.FieldArtifactSizeBytes: + return m.ArtifactSizeBytes() + case backupjob.FieldArtifactSha256: + return m.ArtifactSha256() + case backupjob.FieldS3Bucket: + return m.S3Bucket() + case backupjob.FieldS3Key: + return m.S3Key() + case backupjob.FieldS3Etag: + return m.S3Etag() + case backupjob.FieldCreatedAt: + return m.CreatedAt() + case backupjob.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *BackupJobMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case backupjob.FieldJobID: + return m.OldJobID(ctx) + case backupjob.FieldBackupType: + return m.OldBackupType(ctx) + case backupjob.FieldStatus: + return m.OldStatus(ctx) + case backupjob.FieldTriggeredBy: + return m.OldTriggeredBy(ctx) + case backupjob.FieldIdempotencyKey: + return m.OldIdempotencyKey(ctx) + case backupjob.FieldUploadToS3: + return m.OldUploadToS3(ctx) + case backupjob.FieldStartedAt: + return m.OldStartedAt(ctx) + case backupjob.FieldFinishedAt: + return m.OldFinishedAt(ctx) + case backupjob.FieldErrorMessage: + return m.OldErrorMessage(ctx) + case backupjob.FieldArtifactLocalPath: + return m.OldArtifactLocalPath(ctx) + case backupjob.FieldArtifactSizeBytes: + return m.OldArtifactSizeBytes(ctx) + case backupjob.FieldArtifactSha256: + return m.OldArtifactSha256(ctx) + case backupjob.FieldS3Bucket: + return m.OldS3Bucket(ctx) + case backupjob.FieldS3Key: + return m.OldS3Key(ctx) + case backupjob.FieldS3Etag: + return m.OldS3Etag(ctx) + case backupjob.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case backupjob.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown BackupJob field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupJobMutation) SetField(name string, value ent.Value) error { + switch name { + case backupjob.FieldJobID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetJobID(v) + return nil + case backupjob.FieldBackupType: + v, ok := value.(backupjob.BackupType) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBackupType(v) + return nil + case backupjob.FieldStatus: + v, ok := value.(backupjob.Status) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case backupjob.FieldTriggeredBy: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTriggeredBy(v) + return nil + case backupjob.FieldIdempotencyKey: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIdempotencyKey(v) + return nil + case backupjob.FieldUploadToS3: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUploadToS3(v) + return nil + case backupjob.FieldStartedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStartedAt(v) + return nil + case backupjob.FieldFinishedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFinishedAt(v) + return nil + case backupjob.FieldErrorMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorMessage(v) + return nil + case backupjob.FieldArtifactLocalPath: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetArtifactLocalPath(v) + return nil + case backupjob.FieldArtifactSizeBytes: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetArtifactSizeBytes(v) + return nil + case backupjob.FieldArtifactSha256: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetArtifactSha256(v) + return nil + case backupjob.FieldS3Bucket: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetS3Bucket(v) + return nil + case backupjob.FieldS3Key: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetS3Key(v) + return nil + case backupjob.FieldS3Etag: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetS3Etag(v) + return nil + case backupjob.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case backupjob.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown BackupJob field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *BackupJobMutation) AddedFields() []string { + var fields []string + if m.addartifact_size_bytes != nil { + fields = append(fields, backupjob.FieldArtifactSizeBytes) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *BackupJobMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case backupjob.FieldArtifactSizeBytes: + return m.AddedArtifactSizeBytes() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupJobMutation) AddField(name string, value ent.Value) error { + switch name { + case backupjob.FieldArtifactSizeBytes: + v, ok := value.(int64) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddArtifactSizeBytes(v) + return nil + } + return fmt.Errorf("unknown BackupJob numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *BackupJobMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(backupjob.FieldIdempotencyKey) { + fields = append(fields, backupjob.FieldIdempotencyKey) + } + if m.FieldCleared(backupjob.FieldStartedAt) { + fields = append(fields, backupjob.FieldStartedAt) + } + if m.FieldCleared(backupjob.FieldFinishedAt) { + fields = append(fields, backupjob.FieldFinishedAt) + } + if m.FieldCleared(backupjob.FieldErrorMessage) { + fields = append(fields, backupjob.FieldErrorMessage) + } + if m.FieldCleared(backupjob.FieldArtifactLocalPath) { + fields = append(fields, backupjob.FieldArtifactLocalPath) + } + if m.FieldCleared(backupjob.FieldArtifactSizeBytes) { + fields = append(fields, backupjob.FieldArtifactSizeBytes) + } + if m.FieldCleared(backupjob.FieldArtifactSha256) { + fields = append(fields, backupjob.FieldArtifactSha256) + } + if m.FieldCleared(backupjob.FieldS3Bucket) { + fields = append(fields, backupjob.FieldS3Bucket) + } + if m.FieldCleared(backupjob.FieldS3Key) { + fields = append(fields, backupjob.FieldS3Key) + } + if m.FieldCleared(backupjob.FieldS3Etag) { + fields = append(fields, backupjob.FieldS3Etag) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *BackupJobMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *BackupJobMutation) ClearField(name string) error { + switch name { + case backupjob.FieldIdempotencyKey: + m.ClearIdempotencyKey() + return nil + case backupjob.FieldStartedAt: + m.ClearStartedAt() + return nil + case backupjob.FieldFinishedAt: + m.ClearFinishedAt() + return nil + case backupjob.FieldErrorMessage: + m.ClearErrorMessage() + return nil + case backupjob.FieldArtifactLocalPath: + m.ClearArtifactLocalPath() + return nil + case backupjob.FieldArtifactSizeBytes: + m.ClearArtifactSizeBytes() + return nil + case backupjob.FieldArtifactSha256: + m.ClearArtifactSha256() + return nil + case backupjob.FieldS3Bucket: + m.ClearS3Bucket() + return nil + case backupjob.FieldS3Key: + m.ClearS3Key() + return nil + case backupjob.FieldS3Etag: + m.ClearS3Etag() + return nil + } + return fmt.Errorf("unknown BackupJob nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *BackupJobMutation) ResetField(name string) error { + switch name { + case backupjob.FieldJobID: + m.ResetJobID() + return nil + case backupjob.FieldBackupType: + m.ResetBackupType() + return nil + case backupjob.FieldStatus: + m.ResetStatus() + return nil + case backupjob.FieldTriggeredBy: + m.ResetTriggeredBy() + return nil + case backupjob.FieldIdempotencyKey: + m.ResetIdempotencyKey() + return nil + case backupjob.FieldUploadToS3: + m.ResetUploadToS3() + return nil + case backupjob.FieldStartedAt: + m.ResetStartedAt() + return nil + case backupjob.FieldFinishedAt: + m.ResetFinishedAt() + return nil + case backupjob.FieldErrorMessage: + m.ResetErrorMessage() + return nil + case backupjob.FieldArtifactLocalPath: + m.ResetArtifactLocalPath() + return nil + case backupjob.FieldArtifactSizeBytes: + m.ResetArtifactSizeBytes() + return nil + case backupjob.FieldArtifactSha256: + m.ResetArtifactSha256() + return nil + case backupjob.FieldS3Bucket: + m.ResetS3Bucket() + return nil + case backupjob.FieldS3Key: + m.ResetS3Key() + return nil + case backupjob.FieldS3Etag: + m.ResetS3Etag() + return nil + case backupjob.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case backupjob.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown BackupJob field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *BackupJobMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.events != nil { + edges = append(edges, backupjob.EdgeEvents) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *BackupJobMutation) AddedIDs(name string) []ent.Value { + switch name { + case backupjob.EdgeEvents: + ids := make([]ent.Value, 0, len(m.events)) + for id := range m.events { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *BackupJobMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + if m.removedevents != nil { + edges = append(edges, backupjob.EdgeEvents) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *BackupJobMutation) RemovedIDs(name string) []ent.Value { + switch name { + case backupjob.EdgeEvents: + ids := make([]ent.Value, 0, len(m.removedevents)) + for id := range m.removedevents { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *BackupJobMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedevents { + edges = append(edges, backupjob.EdgeEvents) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *BackupJobMutation) EdgeCleared(name string) bool { + switch name { + case backupjob.EdgeEvents: + return m.clearedevents + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *BackupJobMutation) ClearEdge(name string) error { + switch name { + } + return fmt.Errorf("unknown BackupJob unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *BackupJobMutation) ResetEdge(name string) error { + switch name { + case backupjob.EdgeEvents: + m.ResetEvents() + return nil + } + return fmt.Errorf("unknown BackupJob edge %s", name) +} + +// BackupJobEventMutation represents an operation that mutates the BackupJobEvent nodes in the graph. +type BackupJobEventMutation struct { + config + op Op + typ string + id *int + level *backupjobevent.Level + event_type *string + message *string + payload *string + event_time *time.Time + created_at *time.Time + clearedFields map[string]struct{} + job *int + clearedjob bool + done bool + oldValue func(context.Context) (*BackupJobEvent, error) + predicates []predicate.BackupJobEvent +} + +var _ ent.Mutation = (*BackupJobEventMutation)(nil) + +// backupjobeventOption allows management of the mutation configuration using functional options. +type backupjobeventOption func(*BackupJobEventMutation) + +// newBackupJobEventMutation creates new mutation for the BackupJobEvent entity. +func newBackupJobEventMutation(c config, op Op, opts ...backupjobeventOption) *BackupJobEventMutation { + m := &BackupJobEventMutation{ + config: c, + op: op, + typ: TypeBackupJobEvent, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBackupJobEventID sets the ID field of the mutation. +func withBackupJobEventID(id int) backupjobeventOption { + return func(m *BackupJobEventMutation) { + var ( + err error + once sync.Once + value *BackupJobEvent + ) + m.oldValue = func(ctx context.Context) (*BackupJobEvent, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().BackupJobEvent.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBackupJobEvent sets the old BackupJobEvent of the mutation. +func withBackupJobEvent(node *BackupJobEvent) backupjobeventOption { + return func(m *BackupJobEventMutation) { + m.oldValue = func(context.Context) (*BackupJobEvent, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BackupJobEventMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BackupJobEventMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *BackupJobEventMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *BackupJobEventMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().BackupJobEvent.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetBackupJobID sets the "backup_job_id" field. +func (m *BackupJobEventMutation) SetBackupJobID(i int) { + m.job = &i +} + +// BackupJobID returns the value of the "backup_job_id" field in the mutation. +func (m *BackupJobEventMutation) BackupJobID() (r int, exists bool) { + v := m.job + if v == nil { + return + } + return *v, true +} + +// OldBackupJobID returns the old "backup_job_id" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldBackupJobID(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBackupJobID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBackupJobID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBackupJobID: %w", err) + } + return oldValue.BackupJobID, nil +} + +// ResetBackupJobID resets all changes to the "backup_job_id" field. +func (m *BackupJobEventMutation) ResetBackupJobID() { + m.job = nil +} + +// SetLevel sets the "level" field. +func (m *BackupJobEventMutation) SetLevel(b backupjobevent.Level) { + m.level = &b +} + +// Level returns the value of the "level" field in the mutation. +func (m *BackupJobEventMutation) Level() (r backupjobevent.Level, exists bool) { + v := m.level + if v == nil { + return + } + return *v, true +} + +// OldLevel returns the old "level" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldLevel(ctx context.Context) (v backupjobevent.Level, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLevel is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLevel requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLevel: %w", err) + } + return oldValue.Level, nil +} + +// ResetLevel resets all changes to the "level" field. +func (m *BackupJobEventMutation) ResetLevel() { + m.level = nil +} + +// SetEventType sets the "event_type" field. +func (m *BackupJobEventMutation) SetEventType(s string) { + m.event_type = &s +} + +// EventType returns the value of the "event_type" field in the mutation. +func (m *BackupJobEventMutation) EventType() (r string, exists bool) { + v := m.event_type + if v == nil { + return + } + return *v, true +} + +// OldEventType returns the old "event_type" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldEventType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEventType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEventType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEventType: %w", err) + } + return oldValue.EventType, nil +} + +// ResetEventType resets all changes to the "event_type" field. +func (m *BackupJobEventMutation) ResetEventType() { + m.event_type = nil +} + +// SetMessage sets the "message" field. +func (m *BackupJobEventMutation) SetMessage(s string) { + m.message = &s +} + +// Message returns the value of the "message" field in the mutation. +func (m *BackupJobEventMutation) Message() (r string, exists bool) { + v := m.message + if v == nil { + return + } + return *v, true +} + +// OldMessage returns the old "message" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldMessage(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMessage is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMessage requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMessage: %w", err) + } + return oldValue.Message, nil +} + +// ResetMessage resets all changes to the "message" field. +func (m *BackupJobEventMutation) ResetMessage() { + m.message = nil +} + +// SetPayload sets the "payload" field. +func (m *BackupJobEventMutation) SetPayload(s string) { + m.payload = &s +} + +// Payload returns the value of the "payload" field in the mutation. +func (m *BackupJobEventMutation) Payload() (r string, exists bool) { + v := m.payload + if v == nil { + return + } + return *v, true +} + +// OldPayload returns the old "payload" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldPayload(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPayload is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPayload requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPayload: %w", err) + } + return oldValue.Payload, nil +} + +// ClearPayload clears the value of the "payload" field. +func (m *BackupJobEventMutation) ClearPayload() { + m.payload = nil + m.clearedFields[backupjobevent.FieldPayload] = struct{}{} +} + +// PayloadCleared returns if the "payload" field was cleared in this mutation. +func (m *BackupJobEventMutation) PayloadCleared() bool { + _, ok := m.clearedFields[backupjobevent.FieldPayload] + return ok +} + +// ResetPayload resets all changes to the "payload" field. +func (m *BackupJobEventMutation) ResetPayload() { + m.payload = nil + delete(m.clearedFields, backupjobevent.FieldPayload) +} + +// SetEventTime sets the "event_time" field. +func (m *BackupJobEventMutation) SetEventTime(t time.Time) { + m.event_time = &t +} + +// EventTime returns the value of the "event_time" field in the mutation. +func (m *BackupJobEventMutation) EventTime() (r time.Time, exists bool) { + v := m.event_time + if v == nil { + return + } + return *v, true +} + +// OldEventTime returns the old "event_time" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldEventTime(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEventTime is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEventTime requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEventTime: %w", err) + } + return oldValue.EventTime, nil +} + +// ResetEventTime resets all changes to the "event_time" field. +func (m *BackupJobEventMutation) ResetEventTime() { + m.event_time = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *BackupJobEventMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *BackupJobEventMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the BackupJobEvent entity. +// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobEventMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *BackupJobEventMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetJobID sets the "job" edge to the BackupJob entity by id. +func (m *BackupJobEventMutation) SetJobID(id int) { + m.job = &id +} + +// ClearJob clears the "job" edge to the BackupJob entity. +func (m *BackupJobEventMutation) ClearJob() { + m.clearedjob = true + m.clearedFields[backupjobevent.FieldBackupJobID] = struct{}{} +} + +// JobCleared reports if the "job" edge to the BackupJob entity was cleared. +func (m *BackupJobEventMutation) JobCleared() bool { + return m.clearedjob +} + +// JobID returns the "job" edge ID in the mutation. +func (m *BackupJobEventMutation) JobID() (id int, exists bool) { + if m.job != nil { + return *m.job, true + } + return +} + +// JobIDs returns the "job" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// JobID instead. It exists only for internal usage by the builders. +func (m *BackupJobEventMutation) JobIDs() (ids []int) { + if id := m.job; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetJob resets all changes to the "job" edge. +func (m *BackupJobEventMutation) ResetJob() { + m.job = nil + m.clearedjob = false +} + +// Where appends a list predicates to the BackupJobEventMutation builder. +func (m *BackupJobEventMutation) Where(ps ...predicate.BackupJobEvent) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the BackupJobEventMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BackupJobEventMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.BackupJobEvent, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *BackupJobEventMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *BackupJobEventMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (BackupJobEvent). +func (m *BackupJobEventMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *BackupJobEventMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.job != nil { + fields = append(fields, backupjobevent.FieldBackupJobID) + } + if m.level != nil { + fields = append(fields, backupjobevent.FieldLevel) + } + if m.event_type != nil { + fields = append(fields, backupjobevent.FieldEventType) + } + if m.message != nil { + fields = append(fields, backupjobevent.FieldMessage) + } + if m.payload != nil { + fields = append(fields, backupjobevent.FieldPayload) + } + if m.event_time != nil { + fields = append(fields, backupjobevent.FieldEventTime) + } + if m.created_at != nil { + fields = append(fields, backupjobevent.FieldCreatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *BackupJobEventMutation) Field(name string) (ent.Value, bool) { + switch name { + case backupjobevent.FieldBackupJobID: + return m.BackupJobID() + case backupjobevent.FieldLevel: + return m.Level() + case backupjobevent.FieldEventType: + return m.EventType() + case backupjobevent.FieldMessage: + return m.Message() + case backupjobevent.FieldPayload: + return m.Payload() + case backupjobevent.FieldEventTime: + return m.EventTime() + case backupjobevent.FieldCreatedAt: + return m.CreatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *BackupJobEventMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case backupjobevent.FieldBackupJobID: + return m.OldBackupJobID(ctx) + case backupjobevent.FieldLevel: + return m.OldLevel(ctx) + case backupjobevent.FieldEventType: + return m.OldEventType(ctx) + case backupjobevent.FieldMessage: + return m.OldMessage(ctx) + case backupjobevent.FieldPayload: + return m.OldPayload(ctx) + case backupjobevent.FieldEventTime: + return m.OldEventTime(ctx) + case backupjobevent.FieldCreatedAt: + return m.OldCreatedAt(ctx) + } + return nil, fmt.Errorf("unknown BackupJobEvent field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupJobEventMutation) SetField(name string, value ent.Value) error { + switch name { + case backupjobevent.FieldBackupJobID: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBackupJobID(v) + return nil + case backupjobevent.FieldLevel: + v, ok := value.(backupjobevent.Level) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLevel(v) + return nil + case backupjobevent.FieldEventType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEventType(v) + return nil + case backupjobevent.FieldMessage: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMessage(v) + return nil + case backupjobevent.FieldPayload: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPayload(v) + return nil + case backupjobevent.FieldEventTime: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEventTime(v) + return nil + case backupjobevent.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + } + return fmt.Errorf("unknown BackupJobEvent field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *BackupJobEventMutation) AddedFields() []string { + var fields []string + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *BackupJobEventMutation) AddedField(name string) (ent.Value, bool) { + switch name { + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupJobEventMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown BackupJobEvent numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *BackupJobEventMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(backupjobevent.FieldPayload) { + fields = append(fields, backupjobevent.FieldPayload) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *BackupJobEventMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *BackupJobEventMutation) ClearField(name string) error { + switch name { + case backupjobevent.FieldPayload: + m.ClearPayload() + return nil + } + return fmt.Errorf("unknown BackupJobEvent nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *BackupJobEventMutation) ResetField(name string) error { + switch name { + case backupjobevent.FieldBackupJobID: + m.ResetBackupJobID() + return nil + case backupjobevent.FieldLevel: + m.ResetLevel() + return nil + case backupjobevent.FieldEventType: + m.ResetEventType() + return nil + case backupjobevent.FieldMessage: + m.ResetMessage() + return nil + case backupjobevent.FieldPayload: + m.ResetPayload() + return nil + case backupjobevent.FieldEventTime: + m.ResetEventTime() + return nil + case backupjobevent.FieldCreatedAt: + m.ResetCreatedAt() + return nil + } + return fmt.Errorf("unknown BackupJobEvent field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *BackupJobEventMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.job != nil { + edges = append(edges, backupjobevent.EdgeJob) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *BackupJobEventMutation) AddedIDs(name string) []ent.Value { + switch name { + case backupjobevent.EdgeJob: + if id := m.job; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *BackupJobEventMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *BackupJobEventMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *BackupJobEventMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedjob { + edges = append(edges, backupjobevent.EdgeJob) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *BackupJobEventMutation) EdgeCleared(name string) bool { + switch name { + case backupjobevent.EdgeJob: + return m.clearedjob + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *BackupJobEventMutation) ClearEdge(name string) error { + switch name { + case backupjobevent.EdgeJob: + m.ClearJob() + return nil + } + return fmt.Errorf("unknown BackupJobEvent unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *BackupJobEventMutation) ResetEdge(name string) error { + switch name { + case backupjobevent.EdgeJob: + m.ResetJob() + return nil + } + return fmt.Errorf("unknown BackupJobEvent edge %s", name) +} + +// BackupS3ConfigMutation represents an operation that mutates the BackupS3Config nodes in the graph. +type BackupS3ConfigMutation struct { + config + op Op + typ string + id *int + enabled *bool + endpoint *string + region *string + bucket *string + access_key_id *string + secret_access_key_encrypted *string + prefix *string + force_path_style *bool + use_ssl *bool + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*BackupS3Config, error) + predicates []predicate.BackupS3Config +} + +var _ ent.Mutation = (*BackupS3ConfigMutation)(nil) + +// backups3configOption allows management of the mutation configuration using functional options. +type backups3configOption func(*BackupS3ConfigMutation) + +// newBackupS3ConfigMutation creates new mutation for the BackupS3Config entity. +func newBackupS3ConfigMutation(c config, op Op, opts ...backups3configOption) *BackupS3ConfigMutation { + m := &BackupS3ConfigMutation{ + config: c, + op: op, + typ: TypeBackupS3Config, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBackupS3ConfigID sets the ID field of the mutation. +func withBackupS3ConfigID(id int) backups3configOption { + return func(m *BackupS3ConfigMutation) { + var ( + err error + once sync.Once + value *BackupS3Config + ) + m.oldValue = func(ctx context.Context) (*BackupS3Config, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().BackupS3Config.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBackupS3Config sets the old BackupS3Config of the mutation. +func withBackupS3Config(node *BackupS3Config) backups3configOption { + return func(m *BackupS3ConfigMutation) { + m.oldValue = func(context.Context) (*BackupS3Config, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BackupS3ConfigMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BackupS3ConfigMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *BackupS3ConfigMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *BackupS3ConfigMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().BackupS3Config.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetEnabled sets the "enabled" field. +func (m *BackupS3ConfigMutation) SetEnabled(b bool) { + m.enabled = &b +} + +// Enabled returns the value of the "enabled" field in the mutation. +func (m *BackupS3ConfigMutation) Enabled() (r bool, exists bool) { + v := m.enabled + if v == nil { + return + } + return *v, true +} + +// OldEnabled returns the old "enabled" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldEnabled(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEnabled is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEnabled requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEnabled: %w", err) + } + return oldValue.Enabled, nil +} + +// ResetEnabled resets all changes to the "enabled" field. +func (m *BackupS3ConfigMutation) ResetEnabled() { + m.enabled = nil +} + +// SetEndpoint sets the "endpoint" field. +func (m *BackupS3ConfigMutation) SetEndpoint(s string) { + m.endpoint = &s +} + +// Endpoint returns the value of the "endpoint" field in the mutation. +func (m *BackupS3ConfigMutation) Endpoint() (r string, exists bool) { + v := m.endpoint + if v == nil { + return + } + return *v, true +} + +// OldEndpoint returns the old "endpoint" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldEndpoint(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldEndpoint is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldEndpoint requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldEndpoint: %w", err) + } + return oldValue.Endpoint, nil +} + +// ResetEndpoint resets all changes to the "endpoint" field. +func (m *BackupS3ConfigMutation) ResetEndpoint() { + m.endpoint = nil +} + +// SetRegion sets the "region" field. +func (m *BackupS3ConfigMutation) SetRegion(s string) { + m.region = &s +} + +// Region returns the value of the "region" field in the mutation. +func (m *BackupS3ConfigMutation) Region() (r string, exists bool) { + v := m.region + if v == nil { + return + } + return *v, true +} + +// OldRegion returns the old "region" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldRegion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRegion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRegion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRegion: %w", err) + } + return oldValue.Region, nil +} + +// ResetRegion resets all changes to the "region" field. +func (m *BackupS3ConfigMutation) ResetRegion() { + m.region = nil +} + +// SetBucket sets the "bucket" field. +func (m *BackupS3ConfigMutation) SetBucket(s string) { + m.bucket = &s +} + +// Bucket returns the value of the "bucket" field in the mutation. +func (m *BackupS3ConfigMutation) Bucket() (r string, exists bool) { + v := m.bucket + if v == nil { + return + } + return *v, true +} + +// OldBucket returns the old "bucket" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldBucket(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBucket is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBucket requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBucket: %w", err) + } + return oldValue.Bucket, nil +} + +// ResetBucket resets all changes to the "bucket" field. +func (m *BackupS3ConfigMutation) ResetBucket() { + m.bucket = nil +} + +// SetAccessKeyID sets the "access_key_id" field. +func (m *BackupS3ConfigMutation) SetAccessKeyID(s string) { + m.access_key_id = &s +} + +// AccessKeyID returns the value of the "access_key_id" field in the mutation. +func (m *BackupS3ConfigMutation) AccessKeyID() (r string, exists bool) { + v := m.access_key_id + if v == nil { + return + } + return *v, true +} + +// OldAccessKeyID returns the old "access_key_id" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldAccessKeyID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAccessKeyID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAccessKeyID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAccessKeyID: %w", err) + } + return oldValue.AccessKeyID, nil +} + +// ResetAccessKeyID resets all changes to the "access_key_id" field. +func (m *BackupS3ConfigMutation) ResetAccessKeyID() { + m.access_key_id = nil +} + +// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. +func (m *BackupS3ConfigMutation) SetSecretAccessKeyEncrypted(s string) { + m.secret_access_key_encrypted = &s +} + +// SecretAccessKeyEncrypted returns the value of the "secret_access_key_encrypted" field in the mutation. +func (m *BackupS3ConfigMutation) SecretAccessKeyEncrypted() (r string, exists bool) { + v := m.secret_access_key_encrypted + if v == nil { + return + } + return *v, true +} + +// OldSecretAccessKeyEncrypted returns the old "secret_access_key_encrypted" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldSecretAccessKeyEncrypted(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSecretAccessKeyEncrypted is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSecretAccessKeyEncrypted requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSecretAccessKeyEncrypted: %w", err) + } + return oldValue.SecretAccessKeyEncrypted, nil +} + +// ClearSecretAccessKeyEncrypted clears the value of the "secret_access_key_encrypted" field. +func (m *BackupS3ConfigMutation) ClearSecretAccessKeyEncrypted() { + m.secret_access_key_encrypted = nil + m.clearedFields[backups3config.FieldSecretAccessKeyEncrypted] = struct{}{} +} + +// SecretAccessKeyEncryptedCleared returns if the "secret_access_key_encrypted" field was cleared in this mutation. +func (m *BackupS3ConfigMutation) SecretAccessKeyEncryptedCleared() bool { + _, ok := m.clearedFields[backups3config.FieldSecretAccessKeyEncrypted] + return ok +} + +// ResetSecretAccessKeyEncrypted resets all changes to the "secret_access_key_encrypted" field. +func (m *BackupS3ConfigMutation) ResetSecretAccessKeyEncrypted() { + m.secret_access_key_encrypted = nil + delete(m.clearedFields, backups3config.FieldSecretAccessKeyEncrypted) +} + +// SetPrefix sets the "prefix" field. +func (m *BackupS3ConfigMutation) SetPrefix(s string) { + m.prefix = &s +} + +// Prefix returns the value of the "prefix" field in the mutation. +func (m *BackupS3ConfigMutation) Prefix() (r string, exists bool) { + v := m.prefix + if v == nil { + return + } + return *v, true +} + +// OldPrefix returns the old "prefix" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldPrefix(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPrefix is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPrefix requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPrefix: %w", err) + } + return oldValue.Prefix, nil +} + +// ResetPrefix resets all changes to the "prefix" field. +func (m *BackupS3ConfigMutation) ResetPrefix() { + m.prefix = nil +} + +// SetForcePathStyle sets the "force_path_style" field. +func (m *BackupS3ConfigMutation) SetForcePathStyle(b bool) { + m.force_path_style = &b +} + +// ForcePathStyle returns the value of the "force_path_style" field in the mutation. +func (m *BackupS3ConfigMutation) ForcePathStyle() (r bool, exists bool) { + v := m.force_path_style + if v == nil { + return + } + return *v, true +} + +// OldForcePathStyle returns the old "force_path_style" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldForcePathStyle(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldForcePathStyle is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldForcePathStyle requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldForcePathStyle: %w", err) + } + return oldValue.ForcePathStyle, nil +} + +// ResetForcePathStyle resets all changes to the "force_path_style" field. +func (m *BackupS3ConfigMutation) ResetForcePathStyle() { + m.force_path_style = nil +} + +// SetUseSsl sets the "use_ssl" field. +func (m *BackupS3ConfigMutation) SetUseSsl(b bool) { + m.use_ssl = &b +} + +// UseSsl returns the value of the "use_ssl" field in the mutation. +func (m *BackupS3ConfigMutation) UseSsl() (r bool, exists bool) { + v := m.use_ssl + if v == nil { + return + } + return *v, true +} + +// OldUseSsl returns the old "use_ssl" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldUseSsl(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUseSsl is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUseSsl requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUseSsl: %w", err) + } + return oldValue.UseSsl, nil +} + +// ResetUseSsl resets all changes to the "use_ssl" field. +func (m *BackupS3ConfigMutation) ResetUseSsl() { + m.use_ssl = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *BackupS3ConfigMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *BackupS3ConfigMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *BackupS3ConfigMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *BackupS3ConfigMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *BackupS3ConfigMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *BackupS3ConfigMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// Where appends a list predicates to the BackupS3ConfigMutation builder. +func (m *BackupS3ConfigMutation) Where(ps ...predicate.BackupS3Config) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the BackupS3ConfigMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BackupS3ConfigMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.BackupS3Config, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *BackupS3ConfigMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *BackupS3ConfigMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (BackupS3Config). +func (m *BackupS3ConfigMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *BackupS3ConfigMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.enabled != nil { + fields = append(fields, backups3config.FieldEnabled) + } + if m.endpoint != nil { + fields = append(fields, backups3config.FieldEndpoint) + } + if m.region != nil { + fields = append(fields, backups3config.FieldRegion) + } + if m.bucket != nil { + fields = append(fields, backups3config.FieldBucket) + } + if m.access_key_id != nil { + fields = append(fields, backups3config.FieldAccessKeyID) + } + if m.secret_access_key_encrypted != nil { + fields = append(fields, backups3config.FieldSecretAccessKeyEncrypted) + } + if m.prefix != nil { + fields = append(fields, backups3config.FieldPrefix) + } + if m.force_path_style != nil { + fields = append(fields, backups3config.FieldForcePathStyle) + } + if m.use_ssl != nil { + fields = append(fields, backups3config.FieldUseSsl) + } + if m.created_at != nil { + fields = append(fields, backups3config.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, backups3config.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *BackupS3ConfigMutation) Field(name string) (ent.Value, bool) { + switch name { + case backups3config.FieldEnabled: + return m.Enabled() + case backups3config.FieldEndpoint: + return m.Endpoint() + case backups3config.FieldRegion: + return m.Region() + case backups3config.FieldBucket: + return m.Bucket() + case backups3config.FieldAccessKeyID: + return m.AccessKeyID() + case backups3config.FieldSecretAccessKeyEncrypted: + return m.SecretAccessKeyEncrypted() + case backups3config.FieldPrefix: + return m.Prefix() + case backups3config.FieldForcePathStyle: + return m.ForcePathStyle() + case backups3config.FieldUseSsl: + return m.UseSsl() + case backups3config.FieldCreatedAt: + return m.CreatedAt() + case backups3config.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *BackupS3ConfigMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case backups3config.FieldEnabled: + return m.OldEnabled(ctx) + case backups3config.FieldEndpoint: + return m.OldEndpoint(ctx) + case backups3config.FieldRegion: + return m.OldRegion(ctx) + case backups3config.FieldBucket: + return m.OldBucket(ctx) + case backups3config.FieldAccessKeyID: + return m.OldAccessKeyID(ctx) + case backups3config.FieldSecretAccessKeyEncrypted: + return m.OldSecretAccessKeyEncrypted(ctx) + case backups3config.FieldPrefix: + return m.OldPrefix(ctx) + case backups3config.FieldForcePathStyle: + return m.OldForcePathStyle(ctx) + case backups3config.FieldUseSsl: + return m.OldUseSsl(ctx) + case backups3config.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case backups3config.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown BackupS3Config field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupS3ConfigMutation) SetField(name string, value ent.Value) error { + switch name { + case backups3config.FieldEnabled: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEnabled(v) + return nil + case backups3config.FieldEndpoint: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetEndpoint(v) + return nil + case backups3config.FieldRegion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRegion(v) + return nil + case backups3config.FieldBucket: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBucket(v) + return nil + case backups3config.FieldAccessKeyID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAccessKeyID(v) + return nil + case backups3config.FieldSecretAccessKeyEncrypted: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSecretAccessKeyEncrypted(v) + return nil + case backups3config.FieldPrefix: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPrefix(v) + return nil + case backups3config.FieldForcePathStyle: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetForcePathStyle(v) + return nil + case backups3config.FieldUseSsl: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUseSsl(v) + return nil + case backups3config.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case backups3config.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown BackupS3Config field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *BackupS3ConfigMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *BackupS3ConfigMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupS3ConfigMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown BackupS3Config numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *BackupS3ConfigMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(backups3config.FieldSecretAccessKeyEncrypted) { + fields = append(fields, backups3config.FieldSecretAccessKeyEncrypted) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *BackupS3ConfigMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *BackupS3ConfigMutation) ClearField(name string) error { + switch name { + case backups3config.FieldSecretAccessKeyEncrypted: + m.ClearSecretAccessKeyEncrypted() + return nil + } + return fmt.Errorf("unknown BackupS3Config nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *BackupS3ConfigMutation) ResetField(name string) error { + switch name { + case backups3config.FieldEnabled: + m.ResetEnabled() + return nil + case backups3config.FieldEndpoint: + m.ResetEndpoint() + return nil + case backups3config.FieldRegion: + m.ResetRegion() + return nil + case backups3config.FieldBucket: + m.ResetBucket() + return nil + case backups3config.FieldAccessKeyID: + m.ResetAccessKeyID() + return nil + case backups3config.FieldSecretAccessKeyEncrypted: + m.ResetSecretAccessKeyEncrypted() + return nil + case backups3config.FieldPrefix: + m.ResetPrefix() + return nil + case backups3config.FieldForcePathStyle: + m.ResetForcePathStyle() + return nil + case backups3config.FieldUseSsl: + m.ResetUseSsl() + return nil + case backups3config.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case backups3config.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown BackupS3Config field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *BackupS3ConfigMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *BackupS3ConfigMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *BackupS3ConfigMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *BackupS3ConfigMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *BackupS3ConfigMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *BackupS3ConfigMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *BackupS3ConfigMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown BackupS3Config unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *BackupS3ConfigMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown BackupS3Config edge %s", name) +} + +// BackupSettingMutation represents an operation that mutates the BackupSetting nodes in the graph. +type BackupSettingMutation struct { + config + op Op + typ string + id *int + source_mode *backupsetting.SourceMode + backup_root *string + retention_days *int + addretention_days *int + keep_last *int + addkeep_last *int + sqlite_path *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*BackupSetting, error) + predicates []predicate.BackupSetting +} + +var _ ent.Mutation = (*BackupSettingMutation)(nil) + +// backupsettingOption allows management of the mutation configuration using functional options. +type backupsettingOption func(*BackupSettingMutation) + +// newBackupSettingMutation creates new mutation for the BackupSetting entity. +func newBackupSettingMutation(c config, op Op, opts ...backupsettingOption) *BackupSettingMutation { + m := &BackupSettingMutation{ + config: c, + op: op, + typ: TypeBackupSetting, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBackupSettingID sets the ID field of the mutation. +func withBackupSettingID(id int) backupsettingOption { + return func(m *BackupSettingMutation) { + var ( + err error + once sync.Once + value *BackupSetting + ) + m.oldValue = func(ctx context.Context) (*BackupSetting, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().BackupSetting.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBackupSetting sets the old BackupSetting of the mutation. +func withBackupSetting(node *BackupSetting) backupsettingOption { + return func(m *BackupSettingMutation) { + m.oldValue = func(context.Context) (*BackupSetting, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BackupSettingMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BackupSettingMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *BackupSettingMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *BackupSettingMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().BackupSetting.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetSourceMode sets the "source_mode" field. +func (m *BackupSettingMutation) SetSourceMode(bm backupsetting.SourceMode) { + m.source_mode = &bm +} + +// SourceMode returns the value of the "source_mode" field in the mutation. +func (m *BackupSettingMutation) SourceMode() (r backupsetting.SourceMode, exists bool) { + v := m.source_mode + if v == nil { + return + } + return *v, true +} + +// OldSourceMode returns the old "source_mode" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldSourceMode(ctx context.Context) (v backupsetting.SourceMode, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceMode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceMode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceMode: %w", err) + } + return oldValue.SourceMode, nil +} + +// ResetSourceMode resets all changes to the "source_mode" field. +func (m *BackupSettingMutation) ResetSourceMode() { + m.source_mode = nil +} + +// SetBackupRoot sets the "backup_root" field. +func (m *BackupSettingMutation) SetBackupRoot(s string) { + m.backup_root = &s +} + +// BackupRoot returns the value of the "backup_root" field in the mutation. +func (m *BackupSettingMutation) BackupRoot() (r string, exists bool) { + v := m.backup_root + if v == nil { + return + } + return *v, true +} + +// OldBackupRoot returns the old "backup_root" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldBackupRoot(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBackupRoot is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBackupRoot requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBackupRoot: %w", err) + } + return oldValue.BackupRoot, nil +} + +// ResetBackupRoot resets all changes to the "backup_root" field. +func (m *BackupSettingMutation) ResetBackupRoot() { + m.backup_root = nil +} + +// SetRetentionDays sets the "retention_days" field. +func (m *BackupSettingMutation) SetRetentionDays(i int) { + m.retention_days = &i + m.addretention_days = nil +} + +// RetentionDays returns the value of the "retention_days" field in the mutation. +func (m *BackupSettingMutation) RetentionDays() (r int, exists bool) { + v := m.retention_days + if v == nil { + return + } + return *v, true +} + +// OldRetentionDays returns the old "retention_days" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldRetentionDays(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRetentionDays is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRetentionDays requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRetentionDays: %w", err) + } + return oldValue.RetentionDays, nil +} + +// AddRetentionDays adds i to the "retention_days" field. +func (m *BackupSettingMutation) AddRetentionDays(i int) { + if m.addretention_days != nil { + *m.addretention_days += i + } else { + m.addretention_days = &i + } +} + +// AddedRetentionDays returns the value that was added to the "retention_days" field in this mutation. +func (m *BackupSettingMutation) AddedRetentionDays() (r int, exists bool) { + v := m.addretention_days + if v == nil { + return + } + return *v, true +} + +// ResetRetentionDays resets all changes to the "retention_days" field. +func (m *BackupSettingMutation) ResetRetentionDays() { + m.retention_days = nil + m.addretention_days = nil +} + +// SetKeepLast sets the "keep_last" field. +func (m *BackupSettingMutation) SetKeepLast(i int) { + m.keep_last = &i + m.addkeep_last = nil +} + +// KeepLast returns the value of the "keep_last" field in the mutation. +func (m *BackupSettingMutation) KeepLast() (r int, exists bool) { + v := m.keep_last + if v == nil { + return + } + return *v, true +} + +// OldKeepLast returns the old "keep_last" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldKeepLast(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldKeepLast is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldKeepLast requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldKeepLast: %w", err) + } + return oldValue.KeepLast, nil +} + +// AddKeepLast adds i to the "keep_last" field. +func (m *BackupSettingMutation) AddKeepLast(i int) { + if m.addkeep_last != nil { + *m.addkeep_last += i + } else { + m.addkeep_last = &i + } +} + +// AddedKeepLast returns the value that was added to the "keep_last" field in this mutation. +func (m *BackupSettingMutation) AddedKeepLast() (r int, exists bool) { + v := m.addkeep_last + if v == nil { + return + } + return *v, true +} + +// ResetKeepLast resets all changes to the "keep_last" field. +func (m *BackupSettingMutation) ResetKeepLast() { + m.keep_last = nil + m.addkeep_last = nil +} + +// SetSqlitePath sets the "sqlite_path" field. +func (m *BackupSettingMutation) SetSqlitePath(s string) { + m.sqlite_path = &s +} + +// SqlitePath returns the value of the "sqlite_path" field in the mutation. +func (m *BackupSettingMutation) SqlitePath() (r string, exists bool) { + v := m.sqlite_path + if v == nil { + return + } + return *v, true +} + +// OldSqlitePath returns the old "sqlite_path" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldSqlitePath(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSqlitePath is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSqlitePath requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSqlitePath: %w", err) + } + return oldValue.SqlitePath, nil +} + +// ResetSqlitePath resets all changes to the "sqlite_path" field. +func (m *BackupSettingMutation) ResetSqlitePath() { + m.sqlite_path = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *BackupSettingMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *BackupSettingMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *BackupSettingMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *BackupSettingMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *BackupSettingMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the BackupSetting entity. +// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSettingMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *BackupSettingMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// Where appends a list predicates to the BackupSettingMutation builder. +func (m *BackupSettingMutation) Where(ps ...predicate.BackupSetting) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the BackupSettingMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BackupSettingMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.BackupSetting, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *BackupSettingMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *BackupSettingMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (BackupSetting). +func (m *BackupSettingMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *BackupSettingMutation) Fields() []string { + fields := make([]string, 0, 7) + if m.source_mode != nil { + fields = append(fields, backupsetting.FieldSourceMode) + } + if m.backup_root != nil { + fields = append(fields, backupsetting.FieldBackupRoot) + } + if m.retention_days != nil { + fields = append(fields, backupsetting.FieldRetentionDays) + } + if m.keep_last != nil { + fields = append(fields, backupsetting.FieldKeepLast) + } + if m.sqlite_path != nil { + fields = append(fields, backupsetting.FieldSqlitePath) + } + if m.created_at != nil { + fields = append(fields, backupsetting.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, backupsetting.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *BackupSettingMutation) Field(name string) (ent.Value, bool) { + switch name { + case backupsetting.FieldSourceMode: + return m.SourceMode() + case backupsetting.FieldBackupRoot: + return m.BackupRoot() + case backupsetting.FieldRetentionDays: + return m.RetentionDays() + case backupsetting.FieldKeepLast: + return m.KeepLast() + case backupsetting.FieldSqlitePath: + return m.SqlitePath() + case backupsetting.FieldCreatedAt: + return m.CreatedAt() + case backupsetting.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *BackupSettingMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case backupsetting.FieldSourceMode: + return m.OldSourceMode(ctx) + case backupsetting.FieldBackupRoot: + return m.OldBackupRoot(ctx) + case backupsetting.FieldRetentionDays: + return m.OldRetentionDays(ctx) + case backupsetting.FieldKeepLast: + return m.OldKeepLast(ctx) + case backupsetting.FieldSqlitePath: + return m.OldSqlitePath(ctx) + case backupsetting.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case backupsetting.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown BackupSetting field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupSettingMutation) SetField(name string, value ent.Value) error { + switch name { + case backupsetting.FieldSourceMode: + v, ok := value.(backupsetting.SourceMode) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceMode(v) + return nil + case backupsetting.FieldBackupRoot: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBackupRoot(v) + return nil + case backupsetting.FieldRetentionDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRetentionDays(v) + return nil + case backupsetting.FieldKeepLast: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetKeepLast(v) + return nil + case backupsetting.FieldSqlitePath: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSqlitePath(v) + return nil + case backupsetting.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case backupsetting.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown BackupSetting field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *BackupSettingMutation) AddedFields() []string { + var fields []string + if m.addretention_days != nil { + fields = append(fields, backupsetting.FieldRetentionDays) + } + if m.addkeep_last != nil { + fields = append(fields, backupsetting.FieldKeepLast) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *BackupSettingMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case backupsetting.FieldRetentionDays: + return m.AddedRetentionDays() + case backupsetting.FieldKeepLast: + return m.AddedKeepLast() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupSettingMutation) AddField(name string, value ent.Value) error { + switch name { + case backupsetting.FieldRetentionDays: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRetentionDays(v) + return nil + case backupsetting.FieldKeepLast: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddKeepLast(v) + return nil + } + return fmt.Errorf("unknown BackupSetting numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *BackupSettingMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *BackupSettingMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *BackupSettingMutation) ClearField(name string) error { + return fmt.Errorf("unknown BackupSetting nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *BackupSettingMutation) ResetField(name string) error { + switch name { + case backupsetting.FieldSourceMode: + m.ResetSourceMode() + return nil + case backupsetting.FieldBackupRoot: + m.ResetBackupRoot() + return nil + case backupsetting.FieldRetentionDays: + m.ResetRetentionDays() + return nil + case backupsetting.FieldKeepLast: + m.ResetKeepLast() + return nil + case backupsetting.FieldSqlitePath: + m.ResetSqlitePath() + return nil + case backupsetting.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case backupsetting.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown BackupSetting field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *BackupSettingMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *BackupSettingMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *BackupSettingMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *BackupSettingMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *BackupSettingMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *BackupSettingMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *BackupSettingMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown BackupSetting unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *BackupSettingMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown BackupSetting edge %s", name) +} + +// BackupSourceConfigMutation represents an operation that mutates the BackupSourceConfig nodes in the graph. +type BackupSourceConfigMutation struct { + config + op Op + typ string + id *int + source_type *backupsourceconfig.SourceType + host *string + port *int + addport *int + username *string + password_encrypted *string + database *string + ssl_mode *string + addr *string + redis_db *int + addredis_db *int + container_name *string + created_at *time.Time + updated_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*BackupSourceConfig, error) + predicates []predicate.BackupSourceConfig +} + +var _ ent.Mutation = (*BackupSourceConfigMutation)(nil) + +// backupsourceconfigOption allows management of the mutation configuration using functional options. +type backupsourceconfigOption func(*BackupSourceConfigMutation) + +// newBackupSourceConfigMutation creates new mutation for the BackupSourceConfig entity. +func newBackupSourceConfigMutation(c config, op Op, opts ...backupsourceconfigOption) *BackupSourceConfigMutation { + m := &BackupSourceConfigMutation{ + config: c, + op: op, + typ: TypeBackupSourceConfig, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withBackupSourceConfigID sets the ID field of the mutation. +func withBackupSourceConfigID(id int) backupsourceconfigOption { + return func(m *BackupSourceConfigMutation) { + var ( + err error + once sync.Once + value *BackupSourceConfig + ) + m.oldValue = func(ctx context.Context) (*BackupSourceConfig, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().BackupSourceConfig.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withBackupSourceConfig sets the old BackupSourceConfig of the mutation. +func withBackupSourceConfig(node *BackupSourceConfig) backupsourceconfigOption { + return func(m *BackupSourceConfigMutation) { + m.oldValue = func(context.Context) (*BackupSourceConfig, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m BackupSourceConfigMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m BackupSourceConfigMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *BackupSourceConfigMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *BackupSourceConfigMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().BackupSourceConfig.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetSourceType sets the "source_type" field. +func (m *BackupSourceConfigMutation) SetSourceType(bt backupsourceconfig.SourceType) { + m.source_type = &bt +} + +// SourceType returns the value of the "source_type" field in the mutation. +func (m *BackupSourceConfigMutation) SourceType() (r backupsourceconfig.SourceType, exists bool) { + v := m.source_type + if v == nil { + return + } + return *v, true +} + +// OldSourceType returns the old "source_type" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldSourceType(ctx context.Context) (v backupsourceconfig.SourceType, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSourceType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSourceType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSourceType: %w", err) + } + return oldValue.SourceType, nil +} + +// ResetSourceType resets all changes to the "source_type" field. +func (m *BackupSourceConfigMutation) ResetSourceType() { + m.source_type = nil +} + +// SetHost sets the "host" field. +func (m *BackupSourceConfigMutation) SetHost(s string) { + m.host = &s +} + +// Host returns the value of the "host" field in the mutation. +func (m *BackupSourceConfigMutation) Host() (r string, exists bool) { + v := m.host + if v == nil { + return + } + return *v, true +} + +// OldHost returns the old "host" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldHost(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHost is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHost requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHost: %w", err) + } + return oldValue.Host, nil +} + +// ClearHost clears the value of the "host" field. +func (m *BackupSourceConfigMutation) ClearHost() { + m.host = nil + m.clearedFields[backupsourceconfig.FieldHost] = struct{}{} +} + +// HostCleared returns if the "host" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) HostCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldHost] + return ok +} + +// ResetHost resets all changes to the "host" field. +func (m *BackupSourceConfigMutation) ResetHost() { + m.host = nil + delete(m.clearedFields, backupsourceconfig.FieldHost) +} + +// SetPort sets the "port" field. +func (m *BackupSourceConfigMutation) SetPort(i int) { + m.port = &i + m.addport = nil +} + +// Port returns the value of the "port" field in the mutation. +func (m *BackupSourceConfigMutation) Port() (r int, exists bool) { + v := m.port + if v == nil { + return + } + return *v, true +} + +// OldPort returns the old "port" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldPort(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPort is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPort requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPort: %w", err) + } + return oldValue.Port, nil +} + +// AddPort adds i to the "port" field. +func (m *BackupSourceConfigMutation) AddPort(i int) { + if m.addport != nil { + *m.addport += i + } else { + m.addport = &i + } +} + +// AddedPort returns the value that was added to the "port" field in this mutation. +func (m *BackupSourceConfigMutation) AddedPort() (r int, exists bool) { + v := m.addport + if v == nil { + return + } + return *v, true +} + +// ClearPort clears the value of the "port" field. +func (m *BackupSourceConfigMutation) ClearPort() { + m.port = nil + m.addport = nil + m.clearedFields[backupsourceconfig.FieldPort] = struct{}{} +} + +// PortCleared returns if the "port" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) PortCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldPort] + return ok +} + +// ResetPort resets all changes to the "port" field. +func (m *BackupSourceConfigMutation) ResetPort() { + m.port = nil + m.addport = nil + delete(m.clearedFields, backupsourceconfig.FieldPort) +} + +// SetUsername sets the "username" field. +func (m *BackupSourceConfigMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *BackupSourceConfigMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ClearUsername clears the value of the "username" field. +func (m *BackupSourceConfigMutation) ClearUsername() { + m.username = nil + m.clearedFields[backupsourceconfig.FieldUsername] = struct{}{} +} + +// UsernameCleared returns if the "username" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) UsernameCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldUsername] + return ok +} + +// ResetUsername resets all changes to the "username" field. +func (m *BackupSourceConfigMutation) ResetUsername() { + m.username = nil + delete(m.clearedFields, backupsourceconfig.FieldUsername) +} + +// SetPasswordEncrypted sets the "password_encrypted" field. +func (m *BackupSourceConfigMutation) SetPasswordEncrypted(s string) { + m.password_encrypted = &s +} + +// PasswordEncrypted returns the value of the "password_encrypted" field in the mutation. +func (m *BackupSourceConfigMutation) PasswordEncrypted() (r string, exists bool) { + v := m.password_encrypted + if v == nil { + return + } + return *v, true +} + +// OldPasswordEncrypted returns the old "password_encrypted" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldPasswordEncrypted(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPasswordEncrypted is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPasswordEncrypted requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPasswordEncrypted: %w", err) + } + return oldValue.PasswordEncrypted, nil +} + +// ClearPasswordEncrypted clears the value of the "password_encrypted" field. +func (m *BackupSourceConfigMutation) ClearPasswordEncrypted() { + m.password_encrypted = nil + m.clearedFields[backupsourceconfig.FieldPasswordEncrypted] = struct{}{} +} + +// PasswordEncryptedCleared returns if the "password_encrypted" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) PasswordEncryptedCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldPasswordEncrypted] + return ok +} + +// ResetPasswordEncrypted resets all changes to the "password_encrypted" field. +func (m *BackupSourceConfigMutation) ResetPasswordEncrypted() { + m.password_encrypted = nil + delete(m.clearedFields, backupsourceconfig.FieldPasswordEncrypted) +} + +// SetDatabase sets the "database" field. +func (m *BackupSourceConfigMutation) SetDatabase(s string) { + m.database = &s +} + +// Database returns the value of the "database" field in the mutation. +func (m *BackupSourceConfigMutation) Database() (r string, exists bool) { + v := m.database + if v == nil { + return + } + return *v, true +} + +// OldDatabase returns the old "database" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldDatabase(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDatabase is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDatabase requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDatabase: %w", err) + } + return oldValue.Database, nil +} + +// ClearDatabase clears the value of the "database" field. +func (m *BackupSourceConfigMutation) ClearDatabase() { + m.database = nil + m.clearedFields[backupsourceconfig.FieldDatabase] = struct{}{} +} + +// DatabaseCleared returns if the "database" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) DatabaseCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldDatabase] + return ok +} + +// ResetDatabase resets all changes to the "database" field. +func (m *BackupSourceConfigMutation) ResetDatabase() { + m.database = nil + delete(m.clearedFields, backupsourceconfig.FieldDatabase) +} + +// SetSslMode sets the "ssl_mode" field. +func (m *BackupSourceConfigMutation) SetSslMode(s string) { + m.ssl_mode = &s +} + +// SslMode returns the value of the "ssl_mode" field in the mutation. +func (m *BackupSourceConfigMutation) SslMode() (r string, exists bool) { + v := m.ssl_mode + if v == nil { + return + } + return *v, true +} + +// OldSslMode returns the old "ssl_mode" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldSslMode(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSslMode is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSslMode requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSslMode: %w", err) + } + return oldValue.SslMode, nil +} + +// ClearSslMode clears the value of the "ssl_mode" field. +func (m *BackupSourceConfigMutation) ClearSslMode() { + m.ssl_mode = nil + m.clearedFields[backupsourceconfig.FieldSslMode] = struct{}{} +} + +// SslModeCleared returns if the "ssl_mode" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) SslModeCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldSslMode] + return ok +} + +// ResetSslMode resets all changes to the "ssl_mode" field. +func (m *BackupSourceConfigMutation) ResetSslMode() { + m.ssl_mode = nil + delete(m.clearedFields, backupsourceconfig.FieldSslMode) +} + +// SetAddr sets the "addr" field. +func (m *BackupSourceConfigMutation) SetAddr(s string) { + m.addr = &s +} + +// Addr returns the value of the "addr" field in the mutation. +func (m *BackupSourceConfigMutation) Addr() (r string, exists bool) { + v := m.addr + if v == nil { + return + } + return *v, true +} + +// OldAddr returns the old "addr" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldAddr(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldAddr is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldAddr requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldAddr: %w", err) + } + return oldValue.Addr, nil +} + +// ClearAddr clears the value of the "addr" field. +func (m *BackupSourceConfigMutation) ClearAddr() { + m.addr = nil + m.clearedFields[backupsourceconfig.FieldAddr] = struct{}{} +} + +// AddrCleared returns if the "addr" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) AddrCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldAddr] + return ok +} + +// ResetAddr resets all changes to the "addr" field. +func (m *BackupSourceConfigMutation) ResetAddr() { + m.addr = nil + delete(m.clearedFields, backupsourceconfig.FieldAddr) +} + +// SetRedisDb sets the "redis_db" field. +func (m *BackupSourceConfigMutation) SetRedisDb(i int) { + m.redis_db = &i + m.addredis_db = nil +} + +// RedisDb returns the value of the "redis_db" field in the mutation. +func (m *BackupSourceConfigMutation) RedisDb() (r int, exists bool) { + v := m.redis_db + if v == nil { + return + } + return *v, true +} + +// OldRedisDb returns the old "redis_db" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldRedisDb(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRedisDb is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRedisDb requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRedisDb: %w", err) + } + return oldValue.RedisDb, nil +} + +// AddRedisDb adds i to the "redis_db" field. +func (m *BackupSourceConfigMutation) AddRedisDb(i int) { + if m.addredis_db != nil { + *m.addredis_db += i + } else { + m.addredis_db = &i + } +} + +// AddedRedisDb returns the value that was added to the "redis_db" field in this mutation. +func (m *BackupSourceConfigMutation) AddedRedisDb() (r int, exists bool) { + v := m.addredis_db + if v == nil { + return + } + return *v, true +} + +// ClearRedisDb clears the value of the "redis_db" field. +func (m *BackupSourceConfigMutation) ClearRedisDb() { + m.redis_db = nil + m.addredis_db = nil + m.clearedFields[backupsourceconfig.FieldRedisDb] = struct{}{} +} + +// RedisDbCleared returns if the "redis_db" field was cleared in this mutation. +func (m *BackupSourceConfigMutation) RedisDbCleared() bool { + _, ok := m.clearedFields[backupsourceconfig.FieldRedisDb] + return ok +} + +// ResetRedisDb resets all changes to the "redis_db" field. +func (m *BackupSourceConfigMutation) ResetRedisDb() { + m.redis_db = nil + m.addredis_db = nil + delete(m.clearedFields, backupsourceconfig.FieldRedisDb) +} + +// SetContainerName sets the "container_name" field. +func (m *BackupSourceConfigMutation) SetContainerName(s string) { + m.container_name = &s +} + +// ContainerName returns the value of the "container_name" field in the mutation. +func (m *BackupSourceConfigMutation) ContainerName() (r string, exists bool) { + v := m.container_name + if v == nil { + return + } + return *v, true +} + +// OldContainerName returns the old "container_name" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldContainerName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldContainerName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldContainerName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldContainerName: %w", err) + } + return oldValue.ContainerName, nil +} + +// ResetContainerName resets all changes to the "container_name" field. +func (m *BackupSourceConfigMutation) ResetContainerName() { + m.container_name = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *BackupSourceConfigMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *BackupSourceConfigMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *BackupSourceConfigMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *BackupSourceConfigMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *BackupSourceConfigMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *BackupSourceConfigMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// Where appends a list predicates to the BackupSourceConfigMutation builder. +func (m *BackupSourceConfigMutation) Where(ps ...predicate.BackupSourceConfig) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the BackupSourceConfigMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *BackupSourceConfigMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.BackupSourceConfig, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *BackupSourceConfigMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *BackupSourceConfigMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (BackupSourceConfig). +func (m *BackupSourceConfigMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *BackupSourceConfigMutation) Fields() []string { + fields := make([]string, 0, 12) + if m.source_type != nil { + fields = append(fields, backupsourceconfig.FieldSourceType) + } + if m.host != nil { + fields = append(fields, backupsourceconfig.FieldHost) + } + if m.port != nil { + fields = append(fields, backupsourceconfig.FieldPort) + } + if m.username != nil { + fields = append(fields, backupsourceconfig.FieldUsername) + } + if m.password_encrypted != nil { + fields = append(fields, backupsourceconfig.FieldPasswordEncrypted) + } + if m.database != nil { + fields = append(fields, backupsourceconfig.FieldDatabase) + } + if m.ssl_mode != nil { + fields = append(fields, backupsourceconfig.FieldSslMode) + } + if m.addr != nil { + fields = append(fields, backupsourceconfig.FieldAddr) + } + if m.redis_db != nil { + fields = append(fields, backupsourceconfig.FieldRedisDb) + } + if m.container_name != nil { + fields = append(fields, backupsourceconfig.FieldContainerName) + } + if m.created_at != nil { + fields = append(fields, backupsourceconfig.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, backupsourceconfig.FieldUpdatedAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *BackupSourceConfigMutation) Field(name string) (ent.Value, bool) { + switch name { + case backupsourceconfig.FieldSourceType: + return m.SourceType() + case backupsourceconfig.FieldHost: + return m.Host() + case backupsourceconfig.FieldPort: + return m.Port() + case backupsourceconfig.FieldUsername: + return m.Username() + case backupsourceconfig.FieldPasswordEncrypted: + return m.PasswordEncrypted() + case backupsourceconfig.FieldDatabase: + return m.Database() + case backupsourceconfig.FieldSslMode: + return m.SslMode() + case backupsourceconfig.FieldAddr: + return m.Addr() + case backupsourceconfig.FieldRedisDb: + return m.RedisDb() + case backupsourceconfig.FieldContainerName: + return m.ContainerName() + case backupsourceconfig.FieldCreatedAt: + return m.CreatedAt() + case backupsourceconfig.FieldUpdatedAt: + return m.UpdatedAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *BackupSourceConfigMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case backupsourceconfig.FieldSourceType: + return m.OldSourceType(ctx) + case backupsourceconfig.FieldHost: + return m.OldHost(ctx) + case backupsourceconfig.FieldPort: + return m.OldPort(ctx) + case backupsourceconfig.FieldUsername: + return m.OldUsername(ctx) + case backupsourceconfig.FieldPasswordEncrypted: + return m.OldPasswordEncrypted(ctx) + case backupsourceconfig.FieldDatabase: + return m.OldDatabase(ctx) + case backupsourceconfig.FieldSslMode: + return m.OldSslMode(ctx) + case backupsourceconfig.FieldAddr: + return m.OldAddr(ctx) + case backupsourceconfig.FieldRedisDb: + return m.OldRedisDb(ctx) + case backupsourceconfig.FieldContainerName: + return m.OldContainerName(ctx) + case backupsourceconfig.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case backupsourceconfig.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + } + return nil, fmt.Errorf("unknown BackupSourceConfig field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupSourceConfigMutation) SetField(name string, value ent.Value) error { + switch name { + case backupsourceconfig.FieldSourceType: + v, ok := value.(backupsourceconfig.SourceType) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSourceType(v) + return nil + case backupsourceconfig.FieldHost: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHost(v) + return nil + case backupsourceconfig.FieldPort: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPort(v) + return nil + case backupsourceconfig.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case backupsourceconfig.FieldPasswordEncrypted: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPasswordEncrypted(v) + return nil + case backupsourceconfig.FieldDatabase: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDatabase(v) + return nil + case backupsourceconfig.FieldSslMode: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSslMode(v) + return nil + case backupsourceconfig.FieldAddr: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetAddr(v) + return nil + case backupsourceconfig.FieldRedisDb: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRedisDb(v) + return nil + case backupsourceconfig.FieldContainerName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetContainerName(v) + return nil + case backupsourceconfig.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case backupsourceconfig.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + } + return fmt.Errorf("unknown BackupSourceConfig field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *BackupSourceConfigMutation) AddedFields() []string { + var fields []string + if m.addport != nil { + fields = append(fields, backupsourceconfig.FieldPort) + } + if m.addredis_db != nil { + fields = append(fields, backupsourceconfig.FieldRedisDb) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *BackupSourceConfigMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case backupsourceconfig.FieldPort: + return m.AddedPort() + case backupsourceconfig.FieldRedisDb: + return m.AddedRedisDb() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *BackupSourceConfigMutation) AddField(name string, value ent.Value) error { + switch name { + case backupsourceconfig.FieldPort: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddPort(v) + return nil + case backupsourceconfig.FieldRedisDb: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddRedisDb(v) + return nil + } + return fmt.Errorf("unknown BackupSourceConfig numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *BackupSourceConfigMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(backupsourceconfig.FieldHost) { + fields = append(fields, backupsourceconfig.FieldHost) + } + if m.FieldCleared(backupsourceconfig.FieldPort) { + fields = append(fields, backupsourceconfig.FieldPort) + } + if m.FieldCleared(backupsourceconfig.FieldUsername) { + fields = append(fields, backupsourceconfig.FieldUsername) + } + if m.FieldCleared(backupsourceconfig.FieldPasswordEncrypted) { + fields = append(fields, backupsourceconfig.FieldPasswordEncrypted) + } + if m.FieldCleared(backupsourceconfig.FieldDatabase) { + fields = append(fields, backupsourceconfig.FieldDatabase) + } + if m.FieldCleared(backupsourceconfig.FieldSslMode) { + fields = append(fields, backupsourceconfig.FieldSslMode) + } + if m.FieldCleared(backupsourceconfig.FieldAddr) { + fields = append(fields, backupsourceconfig.FieldAddr) + } + if m.FieldCleared(backupsourceconfig.FieldRedisDb) { + fields = append(fields, backupsourceconfig.FieldRedisDb) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *BackupSourceConfigMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *BackupSourceConfigMutation) ClearField(name string) error { + switch name { + case backupsourceconfig.FieldHost: + m.ClearHost() + return nil + case backupsourceconfig.FieldPort: + m.ClearPort() + return nil + case backupsourceconfig.FieldUsername: + m.ClearUsername() + return nil + case backupsourceconfig.FieldPasswordEncrypted: + m.ClearPasswordEncrypted() + return nil + case backupsourceconfig.FieldDatabase: + m.ClearDatabase() + return nil + case backupsourceconfig.FieldSslMode: + m.ClearSslMode() + return nil + case backupsourceconfig.FieldAddr: + m.ClearAddr() + return nil + case backupsourceconfig.FieldRedisDb: + m.ClearRedisDb() + return nil + } + return fmt.Errorf("unknown BackupSourceConfig nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *BackupSourceConfigMutation) ResetField(name string) error { + switch name { + case backupsourceconfig.FieldSourceType: + m.ResetSourceType() + return nil + case backupsourceconfig.FieldHost: + m.ResetHost() + return nil + case backupsourceconfig.FieldPort: + m.ResetPort() + return nil + case backupsourceconfig.FieldUsername: + m.ResetUsername() + return nil + case backupsourceconfig.FieldPasswordEncrypted: + m.ResetPasswordEncrypted() + return nil + case backupsourceconfig.FieldDatabase: + m.ResetDatabase() + return nil + case backupsourceconfig.FieldSslMode: + m.ResetSslMode() + return nil + case backupsourceconfig.FieldAddr: + m.ResetAddr() + return nil + case backupsourceconfig.FieldRedisDb: + m.ResetRedisDb() + return nil + case backupsourceconfig.FieldContainerName: + m.ResetContainerName() + return nil + case backupsourceconfig.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case backupsourceconfig.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + } + return fmt.Errorf("unknown BackupSourceConfig field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *BackupSourceConfigMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *BackupSourceConfigMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *BackupSourceConfigMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *BackupSourceConfigMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *BackupSourceConfigMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *BackupSourceConfigMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *BackupSourceConfigMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown BackupSourceConfig unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *BackupSourceConfigMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown BackupSourceConfig edge %s", name) +} diff --git a/backup/ent/predicate/predicate.go b/backup/ent/predicate/predicate.go new file mode 100644 index 000000000..e1f1ad927 --- /dev/null +++ b/backup/ent/predicate/predicate.go @@ -0,0 +1,22 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// BackupJob is the predicate function for backupjob builders. +type BackupJob func(*sql.Selector) + +// BackupJobEvent is the predicate function for backupjobevent builders. +type BackupJobEvent func(*sql.Selector) + +// BackupS3Config is the predicate function for backups3config builders. +type BackupS3Config func(*sql.Selector) + +// BackupSetting is the predicate function for backupsetting builders. +type BackupSetting func(*sql.Selector) + +// BackupSourceConfig is the predicate function for backupsourceconfig builders. +type BackupSourceConfig func(*sql.Selector) diff --git a/backup/ent/runtime.go b/backup/ent/runtime.go new file mode 100644 index 000000000..d46c11e92 --- /dev/null +++ b/backup/ent/runtime.go @@ -0,0 +1,142 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "time" + + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" + "github.com/Wei-Shaw/sub2api/backup/ent/schema" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + backupjobFields := schema.BackupJob{}.Fields() + _ = backupjobFields + // backupjobDescTriggeredBy is the schema descriptor for triggered_by field. + backupjobDescTriggeredBy := backupjobFields[3].Descriptor() + // backupjob.DefaultTriggeredBy holds the default value on creation for the triggered_by field. + backupjob.DefaultTriggeredBy = backupjobDescTriggeredBy.Default.(string) + // backupjobDescUploadToS3 is the schema descriptor for upload_to_s3 field. + backupjobDescUploadToS3 := backupjobFields[5].Descriptor() + // backupjob.DefaultUploadToS3 holds the default value on creation for the upload_to_s3 field. + backupjob.DefaultUploadToS3 = backupjobDescUploadToS3.Default.(bool) + // backupjobDescCreatedAt is the schema descriptor for created_at field. + backupjobDescCreatedAt := backupjobFields[15].Descriptor() + // backupjob.DefaultCreatedAt holds the default value on creation for the created_at field. + backupjob.DefaultCreatedAt = backupjobDescCreatedAt.Default.(func() time.Time) + // backupjobDescUpdatedAt is the schema descriptor for updated_at field. + backupjobDescUpdatedAt := backupjobFields[16].Descriptor() + // backupjob.DefaultUpdatedAt holds the default value on creation for the updated_at field. + backupjob.DefaultUpdatedAt = backupjobDescUpdatedAt.Default.(func() time.Time) + // backupjob.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + backupjob.UpdateDefaultUpdatedAt = backupjobDescUpdatedAt.UpdateDefault.(func() time.Time) + backupjobeventFields := schema.BackupJobEvent{}.Fields() + _ = backupjobeventFields + // backupjobeventDescEventType is the schema descriptor for event_type field. + backupjobeventDescEventType := backupjobeventFields[2].Descriptor() + // backupjobevent.DefaultEventType holds the default value on creation for the event_type field. + backupjobevent.DefaultEventType = backupjobeventDescEventType.Default.(string) + // backupjobeventDescEventTime is the schema descriptor for event_time field. + backupjobeventDescEventTime := backupjobeventFields[5].Descriptor() + // backupjobevent.DefaultEventTime holds the default value on creation for the event_time field. + backupjobevent.DefaultEventTime = backupjobeventDescEventTime.Default.(func() time.Time) + // backupjobeventDescCreatedAt is the schema descriptor for created_at field. + backupjobeventDescCreatedAt := backupjobeventFields[6].Descriptor() + // backupjobevent.DefaultCreatedAt holds the default value on creation for the created_at field. + backupjobevent.DefaultCreatedAt = backupjobeventDescCreatedAt.Default.(func() time.Time) + backups3configFields := schema.BackupS3Config{}.Fields() + _ = backups3configFields + // backups3configDescEnabled is the schema descriptor for enabled field. + backups3configDescEnabled := backups3configFields[0].Descriptor() + // backups3config.DefaultEnabled holds the default value on creation for the enabled field. + backups3config.DefaultEnabled = backups3configDescEnabled.Default.(bool) + // backups3configDescEndpoint is the schema descriptor for endpoint field. + backups3configDescEndpoint := backups3configFields[1].Descriptor() + // backups3config.DefaultEndpoint holds the default value on creation for the endpoint field. + backups3config.DefaultEndpoint = backups3configDescEndpoint.Default.(string) + // backups3configDescRegion is the schema descriptor for region field. + backups3configDescRegion := backups3configFields[2].Descriptor() + // backups3config.DefaultRegion holds the default value on creation for the region field. + backups3config.DefaultRegion = backups3configDescRegion.Default.(string) + // backups3configDescBucket is the schema descriptor for bucket field. + backups3configDescBucket := backups3configFields[3].Descriptor() + // backups3config.DefaultBucket holds the default value on creation for the bucket field. + backups3config.DefaultBucket = backups3configDescBucket.Default.(string) + // backups3configDescAccessKeyID is the schema descriptor for access_key_id field. + backups3configDescAccessKeyID := backups3configFields[4].Descriptor() + // backups3config.DefaultAccessKeyID holds the default value on creation for the access_key_id field. + backups3config.DefaultAccessKeyID = backups3configDescAccessKeyID.Default.(string) + // backups3configDescPrefix is the schema descriptor for prefix field. + backups3configDescPrefix := backups3configFields[6].Descriptor() + // backups3config.DefaultPrefix holds the default value on creation for the prefix field. + backups3config.DefaultPrefix = backups3configDescPrefix.Default.(string) + // backups3configDescForcePathStyle is the schema descriptor for force_path_style field. + backups3configDescForcePathStyle := backups3configFields[7].Descriptor() + // backups3config.DefaultForcePathStyle holds the default value on creation for the force_path_style field. + backups3config.DefaultForcePathStyle = backups3configDescForcePathStyle.Default.(bool) + // backups3configDescUseSsl is the schema descriptor for use_ssl field. + backups3configDescUseSsl := backups3configFields[8].Descriptor() + // backups3config.DefaultUseSsl holds the default value on creation for the use_ssl field. + backups3config.DefaultUseSsl = backups3configDescUseSsl.Default.(bool) + // backups3configDescCreatedAt is the schema descriptor for created_at field. + backups3configDescCreatedAt := backups3configFields[9].Descriptor() + // backups3config.DefaultCreatedAt holds the default value on creation for the created_at field. + backups3config.DefaultCreatedAt = backups3configDescCreatedAt.Default.(func() time.Time) + // backups3configDescUpdatedAt is the schema descriptor for updated_at field. + backups3configDescUpdatedAt := backups3configFields[10].Descriptor() + // backups3config.DefaultUpdatedAt holds the default value on creation for the updated_at field. + backups3config.DefaultUpdatedAt = backups3configDescUpdatedAt.Default.(func() time.Time) + // backups3config.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + backups3config.UpdateDefaultUpdatedAt = backups3configDescUpdatedAt.UpdateDefault.(func() time.Time) + backupsettingFields := schema.BackupSetting{}.Fields() + _ = backupsettingFields + // backupsettingDescBackupRoot is the schema descriptor for backup_root field. + backupsettingDescBackupRoot := backupsettingFields[1].Descriptor() + // backupsetting.DefaultBackupRoot holds the default value on creation for the backup_root field. + backupsetting.DefaultBackupRoot = backupsettingDescBackupRoot.Default.(string) + // backupsettingDescRetentionDays is the schema descriptor for retention_days field. + backupsettingDescRetentionDays := backupsettingFields[2].Descriptor() + // backupsetting.DefaultRetentionDays holds the default value on creation for the retention_days field. + backupsetting.DefaultRetentionDays = backupsettingDescRetentionDays.Default.(int) + // backupsettingDescKeepLast is the schema descriptor for keep_last field. + backupsettingDescKeepLast := backupsettingFields[3].Descriptor() + // backupsetting.DefaultKeepLast holds the default value on creation for the keep_last field. + backupsetting.DefaultKeepLast = backupsettingDescKeepLast.Default.(int) + // backupsettingDescSqlitePath is the schema descriptor for sqlite_path field. + backupsettingDescSqlitePath := backupsettingFields[4].Descriptor() + // backupsetting.DefaultSqlitePath holds the default value on creation for the sqlite_path field. + backupsetting.DefaultSqlitePath = backupsettingDescSqlitePath.Default.(string) + // backupsettingDescCreatedAt is the schema descriptor for created_at field. + backupsettingDescCreatedAt := backupsettingFields[5].Descriptor() + // backupsetting.DefaultCreatedAt holds the default value on creation for the created_at field. + backupsetting.DefaultCreatedAt = backupsettingDescCreatedAt.Default.(func() time.Time) + // backupsettingDescUpdatedAt is the schema descriptor for updated_at field. + backupsettingDescUpdatedAt := backupsettingFields[6].Descriptor() + // backupsetting.DefaultUpdatedAt holds the default value on creation for the updated_at field. + backupsetting.DefaultUpdatedAt = backupsettingDescUpdatedAt.Default.(func() time.Time) + // backupsetting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + backupsetting.UpdateDefaultUpdatedAt = backupsettingDescUpdatedAt.UpdateDefault.(func() time.Time) + backupsourceconfigFields := schema.BackupSourceConfig{}.Fields() + _ = backupsourceconfigFields + // backupsourceconfigDescContainerName is the schema descriptor for container_name field. + backupsourceconfigDescContainerName := backupsourceconfigFields[9].Descriptor() + // backupsourceconfig.DefaultContainerName holds the default value on creation for the container_name field. + backupsourceconfig.DefaultContainerName = backupsourceconfigDescContainerName.Default.(string) + // backupsourceconfigDescCreatedAt is the schema descriptor for created_at field. + backupsourceconfigDescCreatedAt := backupsourceconfigFields[10].Descriptor() + // backupsourceconfig.DefaultCreatedAt holds the default value on creation for the created_at field. + backupsourceconfig.DefaultCreatedAt = backupsourceconfigDescCreatedAt.Default.(func() time.Time) + // backupsourceconfigDescUpdatedAt is the schema descriptor for updated_at field. + backupsourceconfigDescUpdatedAt := backupsourceconfigFields[11].Descriptor() + // backupsourceconfig.DefaultUpdatedAt holds the default value on creation for the updated_at field. + backupsourceconfig.DefaultUpdatedAt = backupsourceconfigDescUpdatedAt.Default.(func() time.Time) + // backupsourceconfig.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + backupsourceconfig.UpdateDefaultUpdatedAt = backupsourceconfigDescUpdatedAt.UpdateDefault.(func() time.Time) +} diff --git a/backup/ent/runtime/runtime.go b/backup/ent/runtime/runtime.go new file mode 100644 index 000000000..98973c65a --- /dev/null +++ b/backup/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in github.com/Wei-Shaw/sub2api/backup/ent/runtime.go + +const ( + Version = "v0.14.5" // Version of ent codegen. + Sum = "h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=" // Sum of ent codegen. +) diff --git a/backup/ent/schema/backup_job.go b/backup/ent/schema/backup_job.go new file mode 100644 index 000000000..b0cbbe8e6 --- /dev/null +++ b/backup/ent/schema/backup_job.go @@ -0,0 +1,50 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +type BackupJob struct { + ent.Schema +} + +func (BackupJob) Fields() []ent.Field { + return []ent.Field{ + field.String("job_id").Unique(), + field.Enum("backup_type").Values("postgres", "redis", "full"), + field.Enum("status").Values("queued", "running", "succeeded", "failed", "partial_succeeded").Default("queued"), + field.String("triggered_by").Default("system"), + field.String("idempotency_key").Optional(), + field.Bool("upload_to_s3").Default(false), + field.Time("started_at").Optional().Nillable(), + field.Time("finished_at").Optional().Nillable(), + field.String("error_message").Optional(), + field.String("artifact_local_path").Optional(), + field.Int64("artifact_size_bytes").Optional().Nillable(), + field.String("artifact_sha256").Optional(), + field.String("s3_bucket").Optional(), + field.String("s3_key").Optional(), + field.String("s3_etag").Optional(), + field.Time("created_at").Default(time.Now).Immutable(), + field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), + } +} + +func (BackupJob) Edges() []ent.Edge { + return []ent.Edge{ + edge.From("events", BackupJobEvent.Type).Ref("job"), + } +} + +func (BackupJob) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("status", "created_at"), + index.Fields("backup_type", "created_at"), + index.Fields("idempotency_key"), + } +} diff --git a/backup/ent/schema/backup_job_event.go b/backup/ent/schema/backup_job_event.go new file mode 100644 index 000000000..b4804ad7a --- /dev/null +++ b/backup/ent/schema/backup_job_event.go @@ -0,0 +1,38 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +type BackupJobEvent struct { + ent.Schema +} + +func (BackupJobEvent) Fields() []ent.Field { + return []ent.Field{ + field.Int("backup_job_id"), + field.Enum("level").Values("info", "warning", "error").Default("info"), + field.String("event_type").Default("state_change"), + field.String("message"), + field.String("payload").Optional(), + field.Time("event_time").Default(time.Now), + field.Time("created_at").Default(time.Now).Immutable(), + } +} + +func (BackupJobEvent) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("job", BackupJob.Type).Field("backup_job_id").Unique().Required(), + } +} + +func (BackupJobEvent) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("backup_job_id", "event_time"), + } +} diff --git a/backup/ent/schema/backup_s3_config.go b/backup/ent/schema/backup_s3_config.go new file mode 100644 index 000000000..3293f0622 --- /dev/null +++ b/backup/ent/schema/backup_s3_config.go @@ -0,0 +1,28 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +type BackupS3Config struct { + ent.Schema +} + +func (BackupS3Config) Fields() []ent.Field { + return []ent.Field{ + field.Bool("enabled").Default(false), + field.String("endpoint").Default(""), + field.String("region").Default(""), + field.String("bucket").Default(""), + field.String("access_key_id").Default(""), + field.String("secret_access_key_encrypted").Optional().Sensitive(), + field.String("prefix").Default(""), + field.Bool("force_path_style").Default(false), + field.Bool("use_ssl").Default(true), + field.Time("created_at").Default(time.Now).Immutable(), + field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), + } +} diff --git a/backup/ent/schema/backup_setting.go b/backup/ent/schema/backup_setting.go new file mode 100644 index 000000000..5ddb69283 --- /dev/null +++ b/backup/ent/schema/backup_setting.go @@ -0,0 +1,24 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +type BackupSetting struct { + ent.Schema +} + +func (BackupSetting) Fields() []ent.Field { + return []ent.Field{ + field.Enum("source_mode").Values("direct", "docker_exec").Default("direct"), + field.String("backup_root").Default("/var/lib/sub2api/backups"), + field.Int("retention_days").Default(7), + field.Int("keep_last").Default(30), + field.String("sqlite_path").Default("/var/lib/sub2api/backupd.db"), + field.Time("created_at").Default(time.Now).Immutable(), + field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), + } +} diff --git a/backup/ent/schema/backup_source_config.go b/backup/ent/schema/backup_source_config.go new file mode 100644 index 000000000..b17de7a76 --- /dev/null +++ b/backup/ent/schema/backup_source_config.go @@ -0,0 +1,36 @@ +package schema + +import ( + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +type BackupSourceConfig struct { + ent.Schema +} + +func (BackupSourceConfig) Fields() []ent.Field { + return []ent.Field{ + field.Enum("source_type").Values("postgres", "redis"), + field.String("host").Optional(), + field.Int("port").Optional().Nillable(), + field.String("username").Optional(), + field.String("password_encrypted").Optional().Sensitive(), + field.String("database").Optional(), + field.String("ssl_mode").Optional(), + field.String("addr").Optional(), + field.Int("redis_db").Optional().Nillable(), + field.String("container_name").Default(""), + field.Time("created_at").Default(time.Now).Immutable(), + field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), + } +} + +func (BackupSourceConfig) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("source_type").Unique(), + } +} diff --git a/backup/ent/tx.go b/backup/ent/tx.go new file mode 100644 index 000000000..ace341489 --- /dev/null +++ b/backup/ent/tx.go @@ -0,0 +1,222 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // BackupJob is the client for interacting with the BackupJob builders. + BackupJob *BackupJobClient + // BackupJobEvent is the client for interacting with the BackupJobEvent builders. + BackupJobEvent *BackupJobEventClient + // BackupS3Config is the client for interacting with the BackupS3Config builders. + BackupS3Config *BackupS3ConfigClient + // BackupSetting is the client for interacting with the BackupSetting builders. + BackupSetting *BackupSettingClient + // BackupSourceConfig is the client for interacting with the BackupSourceConfig builders. + BackupSourceConfig *BackupSourceConfigClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.BackupJob = NewBackupJobClient(tx.config) + tx.BackupJobEvent = NewBackupJobEventClient(tx.config) + tx.BackupS3Config = NewBackupS3ConfigClient(tx.config) + tx.BackupSetting = NewBackupSettingClient(tx.config) + tx.BackupSourceConfig = NewBackupSourceConfigClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: BackupJob.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/backup/go.mod b/backup/go.mod new file mode 100644 index 000000000..f29d205f7 --- /dev/null +++ b/backup/go.mod @@ -0,0 +1,62 @@ +module github.com/Wei-Shaw/sub2api/backup + +go 1.25.7 + +require ( + entgo.io/ent v0.14.5 + github.com/aws/aws-sdk-go-v2 v1.41.2 + github.com/aws/aws-sdk-go-v2/config v1.32.10 + github.com/aws/aws-sdk-go-v2/credentials v1.19.10 + github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.3 + github.com/aws/aws-sdk-go-v2/service/s3 v1.96.1 + github.com/stretchr/testify v1.8.4 + google.golang.org/grpc v1.75.1 + google.golang.org/protobuf v1.36.6 + modernc.org/sqlite v1.44.3 +) + +require ( + ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect + github.com/agext/levenshtein v1.2.3 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 // indirect + github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect + github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.9 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 // indirect + github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 // indirect + github.com/aws/smithy-go v1.24.1 // indirect + github.com/bmatcuk/doublestar v1.3.4 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/hashicorp/hcl/v2 v2.18.1 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/ncruces/go-strftime v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rogpeppe/go-internal v1.14.1 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty-yaml v1.1.0 // indirect + golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.46.0 // indirect + golang.org/x/sys v0.37.0 // indirect + golang.org/x/text v0.30.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/libc v1.67.6 // indirect + modernc.org/mathutil v1.7.1 // indirect + modernc.org/memory v1.11.0 // indirect +) diff --git a/backup/go.sum b/backup/go.sum new file mode 100644 index 000000000..a72f7c295 --- /dev/null +++ b/backup/go.sum @@ -0,0 +1,174 @@ +ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 h1:E0wvcUXTkgyN4wy4LGtNzMNGMytJN8afmIWXJVMi4cc= +ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= +entgo.io/ent v0.14.5 h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4= +entgo.io/ent v0.14.5/go.mod h1:zTzLmWtPvGpmSwtkaayM2cm5m819NdM7z7tYPq3vN0U= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= +github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/aws/aws-sdk-go-v2 v1.41.2 h1:LuT2rzqNQsauaGkPK/7813XxcZ3o3yePY0Iy891T2ls= +github.com/aws/aws-sdk-go-v2 v1.41.2/go.mod h1:IvvlAZQXvTXznUPfRVfryiG1fbzE2NGK6m9u39YQ+S4= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 h1:zWFmPmgw4sveAYi1mRqG+E/g0461cJ5M4bJ8/nc6d3Q= +github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5/go.mod h1:nVUlMLVV8ycXSb7mSkcNu9e3v/1TJq2RTlrPwhYWr5c= +github.com/aws/aws-sdk-go-v2/config v1.32.10 h1:9DMthfO6XWZYLfzZglAgW5Fyou2nRI5CuV44sTedKBI= +github.com/aws/aws-sdk-go-v2/config v1.32.10/go.mod h1:2rUIOnA2JaiqYmSKYmRJlcMWy6qTj1vuRFscppSBMcw= +github.com/aws/aws-sdk-go-v2/credentials v1.19.10 h1:EEhmEUFCE1Yhl7vDhNOI5OCL/iKMdkkYFTRpZXNw7m8= +github.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 h1:Ii4s+Sq3yDfaMLpjrJsqD6SmG/Wq/P5L/hw2qa78UAY= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18/go.mod h1:6x81qnY++ovptLE6nWQeWrpXxbnlIex+4H4eYYGcqfc= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.3 h1:+mQ8NQBh7B7c2FBtppRnwkrmuwFON1XQQ+5yblomZKk= +github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.3/go.mod h1:u67RKh3BRmS4FYLH+rN3N4T5fqpd9m2ttAwBJYEdosU= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 h1:F43zk1vemYIqPAwhjTjYIz0irU2EY7sOb/F5eJ3HuyM= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18/go.mod h1:w1jdlZXrGKaJcNoL+Nnrj+k5wlpGXqnNrKoP22HvAug= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 h1:xCeWVjj0ki0l3nruoyP2slHsGArMxeiiaoPN5QZH6YQ= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18/go.mod h1:r/eLGuGCBw6l36ZRWiw6PaZwPXb6YOj+i/7MizNl5/k= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= +github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 h1:eZioDaZGJ0tMM4gzmkNIO2aAoQd+je7Ug7TkvAzlmkU= +github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18/go.mod h1:CCXwUKAJdoWr6/NcxZ+zsiPr6oH/Q5aTooRGYieAyj4= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 h1:CeY9LUdur+Dxoeldqoun6y4WtJ3RQtzk0JMP2gfUay0= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5/go.mod h1:AZLZf2fMaahW5s/wMRciu1sYbdsikT/UHwbUjOdEVTc= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.9 h1:IJRzQTvdpjHRPItx9gzNcz7Y1F+xqAR+xiy9rr5ZYl8= +github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.9/go.mod h1:Kzm5e6OmNH8VMkgK9t+ry5jEih4Y8whqs+1hrkxim1I= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 h1:LTRCYFlnnKFlKsyIQxKhJuDuA3ZkrDQMRYm6rXiHlLY= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18/go.mod h1:XhwkgGG6bHSd00nO/mexWTcTjgd6PjuvWQMqSn2UaEk= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 h1:/A/xDuZAVD2BpsS2fftFRo/NoEKQJ8YTnJDEHBy2Gtg= +github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18/go.mod h1:hWe9b4f+djUQGmyiGEeOnZv69dtMSgpDRIvNMvuvzvY= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.1 h1:giB30dEeoar5bgDnkE0q+z7cFjcHaCjulpmPVmuKR84= +github.com/aws/aws-sdk-go-v2/service/s3 v1.96.1/go.mod h1:071TH4M3botFLWDbzQLfBR7tXYi7Fs2RsXSiH7nlUlY= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 h1:MzORe+J94I+hYu2a6XmV5yC9huoTv8NRcCrUNedDypQ= +github.com/aws/aws-sdk-go-v2/service/signin v1.0.6/go.mod h1:hXzcHLARD7GeWnifd8j9RWqtfIgxj4/cAtIVIK7hg8g= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 h1:7oGD8KPfBOJGXiCoRKrrrQkbvCp8N++u36hrLMPey6o= +github.com/aws/aws-sdk-go-v2/service/sso v1.30.11/go.mod h1:0DO9B5EUJQlIDif+XJRWCljZRKsAFKh3gpFz7UnDtOo= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 h1:edCcNp9eGIUDUCrzoCu1jWAXLGFIizeqkdkKgRlJwWc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15/go.mod h1:lyRQKED9xWfgkYC/wmmYfv7iVIM68Z5OQ88ZdcV1QbU= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 h1:NITQpgo9A5NrDZ57uOWj+abvXSb83BbyggcUBVksN7c= +github.com/aws/aws-sdk-go-v2/service/sts v1.41.7/go.mod h1:sks5UWBhEuWYDPdwlnRFn1w7xWdH29Jcpe+/PJQefEs= +github.com/aws/smithy-go v1.24.1 h1:VbyeNfmYkWoxMVpGUAbQumkODcYmfMRfZ8yQiH30SK0= +github.com/aws/smithy-go v1.24.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= +github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= +github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= +github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= +github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= +github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= +github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= +github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= +github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= +github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= +github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= +go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= +go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= +go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= +go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= +go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= +go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= +go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= +go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= +golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= +golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= +golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= +golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= +golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= +google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= +gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= +modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= +modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= +modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM= +modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= +modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= +modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= +modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= +modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE= +modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY= +modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= +modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= +modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= +modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= +modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= +modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= +modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= +modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= +modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= +modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= +modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= +modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= +modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY= +modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= +modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= +modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/backup/internal/artifact/doc.go b/backup/internal/artifact/doc.go new file mode 100644 index 000000000..c146142de --- /dev/null +++ b/backup/internal/artifact/doc.go @@ -0,0 +1 @@ +package artifact diff --git a/backup/internal/config/doc.go b/backup/internal/config/doc.go new file mode 100644 index 000000000..d912156be --- /dev/null +++ b/backup/internal/config/doc.go @@ -0,0 +1 @@ +package config diff --git a/backup/internal/executor/doc.go b/backup/internal/executor/doc.go new file mode 100644 index 000000000..be0ba73e6 --- /dev/null +++ b/backup/internal/executor/doc.go @@ -0,0 +1 @@ +package executor diff --git a/backup/internal/executor/runner.go b/backup/internal/executor/runner.go new file mode 100644 index 000000000..e55c641ad --- /dev/null +++ b/backup/internal/executor/runner.go @@ -0,0 +1,755 @@ +package executor + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/Wei-Shaw/sub2api/backup/ent" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/internal/s3client" + "github.com/Wei-Shaw/sub2api/backup/internal/store/entstore" +) + +const ( + defaultPollInterval = 5 * time.Second + defaultRunTimeout = 30 * time.Minute + defaultEventTimeout = 2 * time.Second + defaultRootDirectory = "/var/lib/sub2api/backups" +) + +type Options struct { + PollInterval time.Duration + RunTimeout time.Duration + Logger *log.Logger +} + +type Runner struct { + store *entstore.Store + pollInterval time.Duration + runTimeout time.Duration + logger *log.Logger + + notifyCh chan struct{} + stopCh chan struct{} + doneCh chan struct{} + + startOnce sync.Once + stopOnce sync.Once +} + +type runResult struct { + Artifact *entstore.BackupArtifactSnapshot + S3Object *entstore.BackupS3ObjectSnapshot + PartialErr error +} + +type generatedFile struct { + ArchiveName string `json:"archive_name"` + LocalPath string `json:"local_path"` + SizeBytes int64 `json:"size_bytes"` + SHA256 string `json:"sha256"` +} + +type bundleManifest struct { + JobID string `json:"job_id"` + BackupType string `json:"backup_type"` + SourceMode string `json:"source_mode"` + CreatedAt string `json:"created_at"` + Files []generatedFile `json:"files"` +} + +func NewRunner(store *entstore.Store, opts Options) *Runner { + poll := opts.PollInterval + if poll <= 0 { + poll = defaultPollInterval + } + runTimeout := opts.RunTimeout + if runTimeout <= 0 { + runTimeout = defaultRunTimeout + } + logger := opts.Logger + if logger == nil { + logger = log.New(os.Stdout, "[backupd-executor] ", log.LstdFlags) + } + + return &Runner{ + store: store, + pollInterval: poll, + runTimeout: runTimeout, + logger: logger, + notifyCh: make(chan struct{}, 1), + stopCh: make(chan struct{}), + doneCh: make(chan struct{}), + } +} + +func (r *Runner) Start() error { + if r == nil || r.store == nil { + return errors.New("executor store is required") + } + + var startErr error + r.startOnce.Do(func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + requeued, err := r.store.RequeueRunningJobs(ctx) + if err != nil { + startErr = err + return + } + if requeued > 0 { + r.logger.Printf("requeued %d running jobs after restart", requeued) + } + + go r.loop() + r.Notify() + }) + return startErr +} + +func (r *Runner) Stop(ctx context.Context) error { + if r == nil { + return nil + } + r.stopOnce.Do(func() { + close(r.stopCh) + }) + + if ctx == nil { + <-r.doneCh + return nil + } + + select { + case <-r.doneCh: + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +func (r *Runner) Notify() { + if r == nil { + return + } + select { + case r.notifyCh <- struct{}{}: + default: + } +} + +func (r *Runner) loop() { + defer close(r.doneCh) + + ticker := time.NewTicker(r.pollInterval) + defer ticker.Stop() + + for { + select { + case <-r.notifyCh: + r.processQueuedJobs() + case <-ticker.C: + r.processQueuedJobs() + case <-r.stopCh: + return + } + } +} + +func (r *Runner) processQueuedJobs() { + for { + select { + case <-r.stopCh: + return + default: + } + + ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + job, err := r.store.AcquireNextQueuedJob(ctx) + cancel() + if err != nil { + if ent.IsNotFound(err) { + return + } + r.logger.Printf("acquire queued job failed: %v", err) + return + } + + r.executeJob(job) + } +} + +func (r *Runner) executeJob(job *ent.BackupJob) { + if job == nil { + return + } + + r.logEvent(job.JobID, "info", "worker", "job picked by executor", "") + + ctx, cancel := context.WithTimeout(context.Background(), r.runTimeout) + defer cancel() + + result, err := r.run(ctx, job) + finishInput := entstore.FinishBackupJobInput{ + JobID: job.JobID, + Status: backupjob.StatusFailed.String(), + } + + if err != nil { + r.logger.Printf("job %s failed: %v", job.JobID, err) + finishInput.ErrorMessage = shortenError(err) + } else { + finishInput.Artifact = result.Artifact + finishInput.S3Object = result.S3Object + switch { + case result.PartialErr != nil: + finishInput.Status = backupjob.StatusPartialSucceeded.String() + finishInput.ErrorMessage = shortenError(result.PartialErr) + default: + finishInput.Status = backupjob.StatusSucceeded.String() + } + } + + finishCtx, finishCancel := context.WithTimeout(context.Background(), 5*time.Second) + defer finishCancel() + if _, finishErr := r.store.FinishBackupJob(finishCtx, finishInput); finishErr != nil { + r.logger.Printf("job %s finish update failed: %v", job.JobID, finishErr) + } +} + +func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error) { + cfg, err := r.store.GetConfig(ctx) + if err != nil { + return nil, fmt.Errorf("load config failed: %w", err) + } + + backupRoot := normalizeBackupRoot(cfg.BackupRoot) + jobDir := filepath.Join( + backupRoot, + time.Now().UTC().Format("2006"), + time.Now().UTC().Format("01"), + time.Now().UTC().Format("02"), + job.JobID, + ) + if err := os.MkdirAll(jobDir, 0o750); err != nil { + return nil, fmt.Errorf("create backup directory failed: %w", err) + } + + generated := make([]generatedFile, 0, 4) + backupType := strings.TrimSpace(job.BackupType.String()) + + if backupType == backupjob.BackupTypePostgres.String() || backupType == backupjob.BackupTypeFull.String() { + postgresPath := filepath.Join(jobDir, "postgres.dump") + if err := runPostgresBackup(ctx, cfg, postgresPath); err != nil { + return nil, fmt.Errorf("postgres backup failed: %w", err) + } + gf, err := buildGeneratedFile("postgres.dump", postgresPath) + if err != nil { + return nil, err + } + generated = append(generated, gf) + r.logEvent(job.JobID, "info", "artifact", "postgres backup finished", "") + } + + if backupType == backupjob.BackupTypeRedis.String() || backupType == backupjob.BackupTypeFull.String() { + redisPath := filepath.Join(jobDir, "redis.rdb") + if err := runRedisBackup(ctx, cfg, redisPath, job.JobID); err != nil { + return nil, fmt.Errorf("redis backup failed: %w", err) + } + gf, err := buildGeneratedFile("redis.rdb", redisPath) + if err != nil { + return nil, err + } + generated = append(generated, gf) + r.logEvent(job.JobID, "info", "artifact", "redis backup finished", "") + } + + manifest := bundleManifest{ + JobID: job.JobID, + BackupType: backupType, + SourceMode: strings.TrimSpace(cfg.SourceMode), + CreatedAt: time.Now().UTC().Format(time.RFC3339), + Files: generated, + } + manifestPath := filepath.Join(jobDir, "manifest.json") + if err := writeManifest(manifestPath, manifest); err != nil { + return nil, fmt.Errorf("write manifest failed: %w", err) + } + manifestGenerated, err := buildGeneratedFile("manifest.json", manifestPath) + if err != nil { + return nil, err + } + generated = append(generated, manifestGenerated) + + bundlePath := filepath.Join(jobDir, "bundle.tar.gz") + if err := writeBundle(bundlePath, generated); err != nil { + return nil, fmt.Errorf("build bundle failed: %w", err) + } + bundleSize, bundleSHA, err := fileDigest(bundlePath) + if err != nil { + return nil, fmt.Errorf("bundle hash failed: %w", err) + } + r.logEvent(job.JobID, "info", "artifact", "bundle generated", "") + + result := &runResult{ + Artifact: &entstore.BackupArtifactSnapshot{ + LocalPath: bundlePath, + SizeBytes: bundleSize, + SHA256: bundleSHA, + }, + } + + if job.UploadToS3 { + r.logEvent(job.JobID, "info", "s3", "start upload to s3", "") + s3Object, uploadErr := uploadToS3(ctx, cfg, job.JobID, bundlePath) + if uploadErr != nil { + result.PartialErr = fmt.Errorf("upload s3 failed: %w", uploadErr) + r.logEvent(job.JobID, "warning", "s3", "upload to s3 failed", shortenError(uploadErr)) + } else { + result.S3Object = s3Object + r.logEvent(job.JobID, "info", "s3", "upload to s3 finished", "") + } + } + + if err := applyRetentionPolicy(ctx, r.store, cfg); err != nil { + r.logger.Printf("retention cleanup failed: %v", err) + } + + return result, nil +} + +func runPostgresBackup(ctx context.Context, cfg *entstore.ConfigSnapshot, destination string) error { + if cfg == nil { + return errors.New("config is nil") + } + mode := normalizeSourceMode(cfg.SourceMode) + pg := cfg.Postgres + host := defaultIfBlank(pg.Host, "127.0.0.1") + port := pg.Port + if port <= 0 { + port = 5432 + } + user := defaultIfBlank(pg.User, "postgres") + database := strings.TrimSpace(pg.Database) + if database == "" { + return errors.New("postgres.database is required") + } + + baseArgs := []string{ + "-h", host, + "-p", strconv.Itoa(int(port)), + "-U", user, + "-d", database, + "--format=custom", + "--no-owner", + "--no-privileges", + } + if strings.TrimSpace(pg.SSLMode) != "" { + baseArgs = append(baseArgs, "--sslmode", strings.TrimSpace(pg.SSLMode)) + } + + switch mode { + case "direct": + args := append([]string{}, baseArgs...) + args = append(args, "--file", destination) + env := []string{} + if strings.TrimSpace(pg.Password) != "" { + env = append(env, "PGPASSWORD="+strings.TrimSpace(pg.Password)) + } + return runCommand(ctx, "pg_dump", args, env, nil) + case "docker_exec": + container := strings.TrimSpace(pg.ContainerName) + if container == "" { + return errors.New("postgres.container_name is required in docker_exec mode") + } + outputFile, err := os.OpenFile(destination, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) + if err != nil { + return err + } + defer func() { + _ = outputFile.Close() + }() + + args := []string{"exec"} + if strings.TrimSpace(pg.Password) != "" { + args = append(args, "-e", "PGPASSWORD="+strings.TrimSpace(pg.Password)) + } + args = append(args, container, "pg_dump") + args = append(args, baseArgs...) + return runCommand(ctx, "docker", args, nil, outputFile) + default: + return fmt.Errorf("unsupported source_mode: %s", mode) + } +} + +func runRedisBackup(ctx context.Context, cfg *entstore.ConfigSnapshot, destination, jobID string) error { + if cfg == nil { + return errors.New("config is nil") + } + mode := normalizeSourceMode(cfg.SourceMode) + redisCfg := cfg.Redis + + host, port := parseRedisAddr(redisCfg.Addr) + baseArgs := []string{} + if host != "" { + baseArgs = append(baseArgs, "-h", host) + } + if port > 0 { + baseArgs = append(baseArgs, "-p", strconv.Itoa(port)) + } + if strings.TrimSpace(redisCfg.Username) != "" { + baseArgs = append(baseArgs, "--user", strings.TrimSpace(redisCfg.Username)) + } + if redisCfg.DB >= 0 { + baseArgs = append(baseArgs, "-n", strconv.Itoa(int(redisCfg.DB))) + } + + env := []string{} + if strings.TrimSpace(redisCfg.Password) != "" { + env = append(env, "REDISCLI_AUTH="+strings.TrimSpace(redisCfg.Password)) + } + + switch mode { + case "direct": + args := append([]string{}, baseArgs...) + args = append(args, "--rdb", destination) + return runCommand(ctx, "redis-cli", args, env, nil) + case "docker_exec": + container := strings.TrimSpace(redisCfg.ContainerName) + if container == "" { + return errors.New("redis.container_name is required in docker_exec mode") + } + tmpPath := fmt.Sprintf("/tmp/sub2api_%s.rdb", sanitizeFileName(jobID)) + + execArgs := []string{"exec"} + for _, item := range env { + execArgs = append(execArgs, "-e", item) + } + execArgs = append(execArgs, container, "redis-cli") + execArgs = append(execArgs, baseArgs...) + execArgs = append(execArgs, "--rdb", tmpPath) + if err := runCommand(ctx, "docker", execArgs, nil, nil); err != nil { + return err + } + + copyArgs := []string{"cp", container + ":" + tmpPath, destination} + if err := runCommand(ctx, "docker", copyArgs, nil, nil); err != nil { + _ = runCommand(ctx, "docker", []string{"exec", container, "rm", "-f", tmpPath}, nil, nil) + return err + } + _ = runCommand(ctx, "docker", []string{"exec", container, "rm", "-f", tmpPath}, nil, nil) + return nil + default: + return fmt.Errorf("unsupported source_mode: %s", mode) + } +} + +func uploadToS3(ctx context.Context, cfg *entstore.ConfigSnapshot, jobID, bundlePath string) (*entstore.BackupS3ObjectSnapshot, error) { + if cfg == nil { + return nil, errors.New("config is nil") + } + if !cfg.S3.Enabled { + return nil, errors.New("s3 is disabled") + } + if strings.TrimSpace(cfg.S3.Bucket) == "" { + return nil, errors.New("s3.bucket is required") + } + if strings.TrimSpace(cfg.S3.Region) == "" { + return nil, errors.New("s3.region is required") + } + + client, err := s3client.New(ctx, s3client.Config{ + Endpoint: strings.TrimSpace(cfg.S3.Endpoint), + Region: strings.TrimSpace(cfg.S3.Region), + AccessKeyID: strings.TrimSpace(cfg.S3.AccessKeyID), + SecretAccessKey: strings.TrimSpace(cfg.S3.SecretAccessKey), + Bucket: strings.TrimSpace(cfg.S3.Bucket), + Prefix: strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/"), + ForcePathStyle: cfg.S3.ForcePathStyle, + UseSSL: cfg.S3.UseSSL, + }) + if err != nil { + return nil, err + } + + now := time.Now().UTC() + key := joinS3Key( + client.Prefix(), + now.Format("2006"), + now.Format("01"), + now.Format("02"), + jobID, + filepath.Base(bundlePath), + ) + etag, err := client.UploadFile(ctx, bundlePath, key) + if err != nil { + return nil, err + } + return &entstore.BackupS3ObjectSnapshot{ + Bucket: client.Bucket(), + Key: key, + ETag: etag, + }, nil +} + +func applyRetentionPolicy(ctx context.Context, store *entstore.Store, cfg *entstore.ConfigSnapshot) error { + if store == nil || cfg == nil { + return nil + } + keepLast := int(cfg.KeepLast) + retentionDays := int(cfg.RetentionDays) + if keepLast <= 0 && retentionDays <= 0 { + return nil + } + + items, err := store.ListFinishedJobsForRetention(ctx) + if err != nil { + return err + } + if len(items) == 0 { + return nil + } + + threshold := time.Now().AddDate(0, 0, -retentionDays) + for idx, item := range items { + if item == nil { + continue + } + keepByCount := keepLast > 0 && idx < keepLast + keepByTime := false + if retentionDays > 0 { + reference := item.CreatedAt + if item.FinishedAt != nil { + reference = *item.FinishedAt + } + keepByTime = reference.After(threshold) + } + if keepByCount || keepByTime { + continue + } + + artifactPath := strings.TrimSpace(item.ArtifactLocalPath) + if artifactPath == "" { + continue + } + if err := os.RemoveAll(filepath.Dir(artifactPath)); err != nil && !errors.Is(err, os.ErrNotExist) { + return err + } + } + return nil +} + +func buildGeneratedFile(archiveName, path string) (generatedFile, error) { + size, sum, err := fileDigest(path) + if err != nil { + return generatedFile{}, err + } + return generatedFile{ + ArchiveName: archiveName, + LocalPath: path, + SizeBytes: size, + SHA256: sum, + }, nil +} + +func writeManifest(path string, manifest bundleManifest) error { + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return err + } + data = append(data, '\n') + return os.WriteFile(path, data, 0o640) +} + +func writeBundle(path string, files []generatedFile) error { + output, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) + if err != nil { + return err + } + defer func() { + _ = output.Close() + }() + + gzipWriter := gzip.NewWriter(output) + defer func() { + _ = gzipWriter.Close() + }() + + tarWriter := tar.NewWriter(gzipWriter) + defer func() { + _ = tarWriter.Close() + }() + + for _, file := range files { + info, err := os.Stat(file.LocalPath) + if err != nil { + return err + } + if !info.Mode().IsRegular() { + continue + } + + header, err := tar.FileInfoHeader(info, "") + if err != nil { + return err + } + header.Name = file.ArchiveName + if err := tarWriter.WriteHeader(header); err != nil { + return err + } + + reader, err := os.Open(file.LocalPath) + if err != nil { + return err + } + if _, err = io.Copy(tarWriter, reader); err != nil { + _ = reader.Close() + return err + } + _ = reader.Close() + } + return nil +} + +func fileDigest(path string) (int64, string, error) { + file, err := os.Open(path) + if err != nil { + return 0, "", err + } + defer func() { + _ = file.Close() + }() + + hash := sha256.New() + size, err := io.Copy(hash, file) + if err != nil { + return 0, "", err + } + return size, hex.EncodeToString(hash.Sum(nil)), nil +} + +func runCommand(ctx context.Context, name string, args []string, extraEnv []string, stdout io.Writer) error { + cmd := exec.CommandContext(ctx, name, args...) + cmd.Env = append(os.Environ(), extraEnv...) + if stdout != nil { + cmd.Stdout = stdout + } + var stderr bytes.Buffer + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + errMsg := strings.TrimSpace(stderr.String()) + if errMsg == "" { + errMsg = err.Error() + } + return fmt.Errorf("%s command failed: %s", name, sanitizeError(errMsg)) + } + return nil +} + +func (r *Runner) logEvent(jobID, level, eventType, message, payload string) { + ctx, cancel := context.WithTimeout(context.Background(), defaultEventTimeout) + defer cancel() + if err := r.store.AppendJobEvent(ctx, jobID, level, eventType, message, payload); err != nil { + r.logger.Printf("append event failed, job=%s event=%s err=%v", jobID, eventType, err) + } +} + +func normalizeSourceMode(v string) string { + mode := strings.TrimSpace(v) + if mode == "" { + return "direct" + } + return mode +} + +func normalizeBackupRoot(root string) string { + trimmed := strings.TrimSpace(root) + if trimmed == "" { + return defaultRootDirectory + } + return trimmed +} + +func parseRedisAddr(addr string) (string, int) { + trimmed := strings.TrimSpace(addr) + if trimmed == "" { + return "127.0.0.1", 6379 + } + + host, portText, err := net.SplitHostPort(trimmed) + if err != nil { + return trimmed, 6379 + } + port, err := strconv.Atoi(portText) + if err != nil || port <= 0 { + return host, 6379 + } + return host, port +} + +func joinS3Key(parts ...string) string { + filtered := make([]string, 0, len(parts)) + for _, part := range parts { + p := strings.Trim(strings.TrimSpace(part), "/") + if p == "" { + continue + } + filtered = append(filtered, p) + } + return strings.Join(filtered, "/") +} + +func sanitizeFileName(v string) string { + trimmed := strings.TrimSpace(v) + if trimmed == "" { + return "job" + } + replacer := strings.NewReplacer("/", "_", "\\", "_", "..", "_", " ", "_") + return replacer.Replace(trimmed) +} + +func sanitizeError(v string) string { + out := strings.TrimSpace(v) + out = strings.ReplaceAll(out, "\n", " ") + out = strings.ReplaceAll(out, "\r", " ") + out = strings.TrimSpace(out) + if out == "" { + return "unknown error" + } + if len(out) > 512 { + return out[:512] + } + return out +} + +func shortenError(err error) string { + if err == nil { + return "" + } + return sanitizeError(err.Error()) +} + +func defaultIfBlank(v, fallback string) string { + if strings.TrimSpace(v) == "" { + return fallback + } + return strings.TrimSpace(v) +} diff --git a/backup/internal/executor/runner_test.go b/backup/internal/executor/runner_test.go new file mode 100644 index 000000000..349253fcc --- /dev/null +++ b/backup/internal/executor/runner_test.go @@ -0,0 +1,110 @@ +package executor + +import ( + "archive/tar" + "compress/gzip" + "io" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseRedisAddr(t *testing.T) { + t.Parallel() + + host, port := parseRedisAddr("127.0.0.1:6380") + require.Equal(t, "127.0.0.1", host) + require.Equal(t, 6380, port) + + host, port = parseRedisAddr("localhost") + require.Equal(t, "localhost", host) + require.Equal(t, 6379, port) + + host, port = parseRedisAddr("") + require.Equal(t, "127.0.0.1", host) + require.Equal(t, 6379, port) +} + +func TestJoinS3Key(t *testing.T) { + t.Parallel() + + require.Equal(t, "a/b/c", joinS3Key("/a/", "/b", "c/")) + require.Equal(t, "a/c", joinS3Key("a", "", "c")) + require.Equal(t, "", joinS3Key("", " ", "/")) +} + +func TestSanitizeError(t *testing.T) { + t.Parallel() + + msg := sanitizeError("line1\nline2\rline3") + require.Equal(t, "line1 line2 line3", msg) + + longMsg := sanitizeError(strings.Repeat("x", 600)) + require.Len(t, longMsg, 512) +} + +func TestWriteManifestAndBundle(t *testing.T) { + t.Parallel() + + workDir := t.TempDir() + fileAPath := filepath.Join(workDir, "postgres.dump") + fileBPath := filepath.Join(workDir, "redis.rdb") + require.NoError(t, os.WriteFile(fileAPath, []byte("postgres-data"), 0o640)) + require.NoError(t, os.WriteFile(fileBPath, []byte("redis-data"), 0o640)) + + fileA, err := buildGeneratedFile("postgres.dump", fileAPath) + require.NoError(t, err) + fileB, err := buildGeneratedFile("redis.rdb", fileBPath) + require.NoError(t, err) + + manifestPath := filepath.Join(workDir, "manifest.json") + require.NoError(t, writeManifest(manifestPath, bundleManifest{ + JobID: "bk_demo", + BackupType: "full", + SourceMode: "direct", + CreatedAt: "2026-01-01T00:00:00Z", + Files: []generatedFile{fileA, fileB}, + })) + manifestFile, err := buildGeneratedFile("manifest.json", manifestPath) + require.NoError(t, err) + + bundlePath := filepath.Join(workDir, "bundle.tar.gz") + require.NoError(t, writeBundle(bundlePath, []generatedFile{fileA, fileB, manifestFile})) + + entries, err := readTarEntries(bundlePath) + require.NoError(t, err) + require.Contains(t, entries, "postgres.dump") + require.Contains(t, entries, "redis.rdb") + require.Contains(t, entries, "manifest.json") +} + +func readTarEntries(bundlePath string) ([]string, error) { + file, err := os.Open(bundlePath) + if err != nil { + return nil, err + } + defer func() { _ = file.Close() }() + + gzReader, err := gzip.NewReader(file) + if err != nil { + return nil, err + } + defer func() { _ = gzReader.Close() }() + + tarReader := tar.NewReader(gzReader) + entries := make([]string, 0, 8) + for { + header, err := tarReader.Next() + if err == io.EOF { + break + } + if err != nil { + return nil, err + } + entries = append(entries, header.Name) + } + return entries, nil +} diff --git a/backup/internal/grpcserver/doc.go b/backup/internal/grpcserver/doc.go new file mode 100644 index 000000000..22ab05063 --- /dev/null +++ b/backup/internal/grpcserver/doc.go @@ -0,0 +1 @@ +package grpcserver diff --git a/backup/internal/grpcserver/interceptor.go b/backup/internal/grpcserver/interceptor.go new file mode 100644 index 000000000..c645746b2 --- /dev/null +++ b/backup/internal/grpcserver/interceptor.go @@ -0,0 +1,131 @@ +package grpcserver + +import ( + "context" + "fmt" + "log" + "strings" + "time" + + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +var defaultMethodTimeouts = map[string]time.Duration{ + "/backup.v1.BackupService/Health": 1 * time.Second, + "/backup.v1.BackupService/GetConfig": 2 * time.Second, + "/backup.v1.BackupService/ListBackupJobs": 2 * time.Second, + "/backup.v1.BackupService/GetBackupJob": 2 * time.Second, + "/backup.v1.BackupService/CreateBackupJob": 3 * time.Second, + "/backup.v1.BackupService/UpdateConfig": 5 * time.Second, + "/backup.v1.BackupService/ValidateS3": 5 * time.Second, +} + +func UnaryServerInterceptor(logger *log.Logger) grpc.UnaryServerInterceptor { + if logger == nil { + logger = log.Default() + } + + return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { + method := "" + if info != nil { + method = info.FullMethod + } + requestID := incomingRequestID(ctx) + + if requestID != "" { + _ = grpc.SetHeader(ctx, metadata.Pairs("x-request-id", requestID)) + } + + callCtx, cancel := applyMethodTimeout(ctx, method) + defer cancel() + + start := time.Now() + defer func() { + if recovered := recover(); recovered != nil { + err = status.Error(codes.Internal, "panic recovered") + logger.Printf( + "[backupd-grpc] request_id=%s method=%s code=%s duration_ms=%d panic=%q", + requestID, + method, + codes.Internal.String(), + time.Since(start).Milliseconds(), + sanitizeLogValue(fmt.Sprint(recovered)), + ) + return + } + + err = normalizeGRPCError(err) + logger.Printf( + "[backupd-grpc] request_id=%s method=%s code=%s duration_ms=%d err=%q", + requestID, + method, + status.Code(err).String(), + time.Since(start).Milliseconds(), + sanitizeLogValue(status.Convert(err).Message()), + ) + }() + + resp, err = handler(callCtx, req) + return resp, err + } +} + +func applyMethodTimeout(ctx context.Context, method string) (context.Context, context.CancelFunc) { + timeout, ok := defaultMethodTimeouts[method] + if !ok || timeout <= 0 { + return context.WithCancel(ctx) + } + + if deadline, hasDeadline := ctx.Deadline(); hasDeadline { + if remaining := time.Until(deadline); remaining > 0 && remaining <= timeout { + return context.WithCancel(ctx) + } + } + return context.WithTimeout(ctx, timeout) +} + +func incomingRequestID(ctx context.Context) string { + md, ok := metadata.FromIncomingContext(ctx) + if !ok { + return "" + } + + for _, key := range []string{"x-request-id", "request-id", "x_request_id"} { + values := md.Get(key) + if len(values) == 0 { + continue + } + value := strings.TrimSpace(values[0]) + if value != "" { + return value + } + } + return "" +} + +func normalizeGRPCError(err error) error { + if err == nil { + return nil + } + + if _, ok := status.FromError(err); ok { + return err + } + return status.Error(codes.Internal, sanitizeLogValue(err.Error())) +} + +func sanitizeLogValue(value string) string { + normalized := strings.TrimSpace(value) + normalized = strings.ReplaceAll(normalized, "\n", " ") + normalized = strings.ReplaceAll(normalized, "\r", " ") + if normalized == "" { + return "-" + } + if len(normalized) > 512 { + return normalized[:512] + } + return normalized +} diff --git a/backup/internal/grpcserver/interceptor_test.go b/backup/internal/grpcserver/interceptor_test.go new file mode 100644 index 000000000..cdcaa4604 --- /dev/null +++ b/backup/internal/grpcserver/interceptor_test.go @@ -0,0 +1,50 @@ +package grpcserver + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/require" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +func TestIncomingRequestID(t *testing.T) { + t.Parallel() + + ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("x-request-id", "req-123")) + require.Equal(t, "req-123", incomingRequestID(ctx)) +} + +func TestNormalizeGRPCError(t *testing.T) { + t.Parallel() + + grpcErr := status.Error(codes.InvalidArgument, "bad") + require.Equal(t, grpcErr, normalizeGRPCError(grpcErr)) + + plain := normalizeGRPCError(errors.New("plain error")) + require.Equal(t, codes.Internal, status.Code(plain)) + require.Contains(t, status.Convert(plain).Message(), "plain error") +} + +func TestApplyMethodTimeout(t *testing.T) { + t.Parallel() + + ctx := context.Background() + callCtx, cancel := applyMethodTimeout(ctx, "/backup.v1.BackupService/Health") + defer cancel() + deadline, ok := callCtx.Deadline() + require.True(t, ok) + require.WithinDuration(t, time.Now().Add(1*time.Second), deadline, 200*time.Millisecond) + + shortCtx, shortCancel := context.WithTimeout(context.Background(), 200*time.Millisecond) + defer shortCancel() + callCtx2, cancel2 := applyMethodTimeout(shortCtx, "/backup.v1.BackupService/UpdateConfig") + defer cancel2() + deadline2, ok2 := callCtx2.Deadline() + require.True(t, ok2) + require.WithinDuration(t, time.Now().Add(200*time.Millisecond), deadline2, 200*time.Millisecond) +} diff --git a/backup/internal/grpcserver/server.go b/backup/internal/grpcserver/server.go new file mode 100644 index 000000000..107906b28 --- /dev/null +++ b/backup/internal/grpcserver/server.go @@ -0,0 +1,342 @@ +package grpcserver + +import ( + "context" + "errors" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/backup/ent" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/internal/s3client" + "github.com/Wei-Shaw/sub2api/backup/internal/store/entstore" + backupv1 "github.com/Wei-Shaw/sub2api/backup/proto/backup/v1" + "github.com/aws/aws-sdk-go-v2/aws" + "github.com/aws/aws-sdk-go-v2/service/s3" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Server struct { + backupv1.UnimplementedBackupServiceServer + store *entstore.Store + startedAt time.Time + version string + notifier queueNotifier +} + +type queueNotifier interface { + Notify() +} + +func New(store *entstore.Store, version string, notifier queueNotifier) *Server { + if strings.TrimSpace(version) == "" { + version = "dev" + } + return &Server{ + store: store, + startedAt: time.Now(), + version: version, + notifier: notifier, + } +} + +func (s *Server) Health(_ context.Context, _ *backupv1.HealthRequest) (*backupv1.HealthResponse, error) { + return &backupv1.HealthResponse{ + Status: "SERVING", + Version: s.version, + UptimeSeconds: int64(time.Since(s.startedAt).Seconds()), + }, nil +} + +func (s *Server) GetConfig(ctx context.Context, _ *backupv1.GetConfigRequest) (*backupv1.GetConfigResponse, error) { + cfg, err := s.store.GetConfig(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "load config failed: %v", err) + } + return &backupv1.GetConfigResponse{Config: toProtoConfig(cfg)}, nil +} + +func (s *Server) UpdateConfig(ctx context.Context, req *backupv1.UpdateConfigRequest) (*backupv1.UpdateConfigResponse, error) { + if req == nil || req.GetConfig() == nil { + return nil, status.Error(codes.InvalidArgument, "config is required") + } + cfg := req.GetConfig() + if err := validateConfig(cfg); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + updated, err := s.store.UpdateConfig(ctx, fromProtoConfig(cfg)) + if err != nil { + return nil, status.Errorf(codes.Internal, "update config failed: %v", err) + } + return &backupv1.UpdateConfigResponse{Config: toProtoConfig(updated)}, nil +} + +func (s *Server) ValidateS3(ctx context.Context, req *backupv1.ValidateS3Request) (*backupv1.ValidateS3Response, error) { + if req == nil || req.GetS3() == nil { + return nil, status.Error(codes.InvalidArgument, "s3 config is required") + } + s3Cfg := req.GetS3() + if strings.TrimSpace(s3Cfg.GetBucket()) == "" { + return nil, status.Error(codes.InvalidArgument, "s3.bucket is required") + } + if strings.TrimSpace(s3Cfg.GetRegion()) == "" { + return nil, status.Error(codes.InvalidArgument, "s3.region is required") + } + + client, err := s3client.New(ctx, s3client.Config{ + Endpoint: strings.TrimSpace(s3Cfg.GetEndpoint()), + Region: strings.TrimSpace(s3Cfg.GetRegion()), + AccessKeyID: strings.TrimSpace(s3Cfg.GetAccessKeyId()), + SecretAccessKey: strings.TrimSpace(s3Cfg.GetSecretAccessKey()), + Bucket: strings.TrimSpace(s3Cfg.GetBucket()), + Prefix: strings.Trim(strings.TrimSpace(s3Cfg.GetPrefix()), "/"), + ForcePathStyle: s3Cfg.GetForcePathStyle(), + UseSSL: s3Cfg.GetUseSsl(), + }) + if err != nil { + return &backupv1.ValidateS3Response{Ok: false, Message: err.Error()}, nil + } + + _, err = client.Raw().HeadBucket(ctx, &s3.HeadBucketInput{Bucket: aws.String(strings.TrimSpace(s3Cfg.GetBucket()))}) + if err != nil { + return &backupv1.ValidateS3Response{Ok: false, Message: err.Error()}, nil + } + return &backupv1.ValidateS3Response{Ok: true, Message: "ok"}, nil +} + +func (s *Server) CreateBackupJob(ctx context.Context, req *backupv1.CreateBackupJobRequest) (*backupv1.CreateBackupJobResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + backupType := strings.TrimSpace(req.GetBackupType()) + if !isValidBackupType(backupType) { + return nil, status.Error(codes.InvalidArgument, "invalid backup_type") + } + + job, created, err := s.store.CreateBackupJob(ctx, entstore.CreateBackupJobInput{ + BackupType: backupType, + UploadToS3: req.GetUploadToS3(), + TriggeredBy: strings.TrimSpace(req.GetTriggeredBy()), + IdempotencyKey: strings.TrimSpace(req.GetIdempotencyKey()), + }) + if err != nil { + return nil, status.Errorf(codes.Internal, "create backup job failed: %v", err) + } + if created && s.notifier != nil { + s.notifier.Notify() + } + return &backupv1.CreateBackupJobResponse{Job: toProtoJob(job)}, nil +} + +func (s *Server) ListBackupJobs(ctx context.Context, req *backupv1.ListBackupJobsRequest) (*backupv1.ListBackupJobsResponse, error) { + if req == nil { + req = &backupv1.ListBackupJobsRequest{} + } + statusFilter := strings.TrimSpace(req.GetStatus()) + if statusFilter != "" && !isValidBackupStatus(statusFilter) { + return nil, status.Error(codes.InvalidArgument, "invalid status filter") + } + backupType := strings.TrimSpace(req.GetBackupType()) + if backupType != "" && !isValidBackupType(backupType) { + return nil, status.Error(codes.InvalidArgument, "invalid backup_type filter") + } + + out, err := s.store.ListBackupJobs(ctx, entstore.ListBackupJobsInput{ + PageSize: req.GetPageSize(), + PageToken: req.GetPageToken(), + Status: statusFilter, + BackupType: backupType, + }) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "list backup jobs failed: %v", err) + } + + items := make([]*backupv1.BackupJob, 0, len(out.Items)) + for _, item := range out.Items { + items = append(items, toProtoJob(item)) + } + return &backupv1.ListBackupJobsResponse{Items: items, NextPageToken: out.NextPageToken}, nil +} + +func (s *Server) GetBackupJob(ctx context.Context, req *backupv1.GetBackupJobRequest) (*backupv1.GetBackupJobResponse, error) { + if req == nil || strings.TrimSpace(req.GetJobId()) == "" { + return nil, status.Error(codes.InvalidArgument, "job_id is required") + } + job, err := s.store.GetBackupJob(ctx, req.GetJobId()) + if err != nil { + if ent.IsNotFound(err) { + return nil, status.Error(codes.NotFound, "backup job not found") + } + return nil, status.Errorf(codes.Internal, "get backup job failed: %v", err) + } + return &backupv1.GetBackupJobResponse{Job: toProtoJob(job)}, nil +} + +func validateConfig(cfg *backupv1.BackupConfig) error { + sourceMode := strings.TrimSpace(cfg.GetSourceMode()) + if sourceMode != "direct" && sourceMode != "docker_exec" { + return errors.New("source_mode must be direct or docker_exec") + } + if strings.TrimSpace(cfg.GetBackupRoot()) == "" { + return errors.New("backup_root is required") + } + if cfg.GetRetentionDays() <= 0 { + return errors.New("retention_days must be > 0") + } + if cfg.GetKeepLast() <= 0 { + return errors.New("keep_last must be > 0") + } + if cfg.GetPostgres() == nil { + return errors.New("postgres config is required") + } + if cfg.GetRedis() == nil { + return errors.New("redis config is required") + } + if cfg.GetS3() == nil { + return errors.New("s3 config is required") + } + return nil +} + +func isValidBackupType(v string) bool { + switch v { + case backupjob.BackupTypePostgres.String(), backupjob.BackupTypeRedis.String(), backupjob.BackupTypeFull.String(): + return true + default: + return false + } +} + +func isValidBackupStatus(v string) bool { + switch v { + case backupjob.StatusQueued.String(), + backupjob.StatusRunning.String(), + backupjob.StatusSucceeded.String(), + backupjob.StatusFailed.String(), + backupjob.StatusPartialSucceeded.String(): + return true + default: + return false + } +} + +func fromProtoConfig(cfg *backupv1.BackupConfig) entstore.ConfigSnapshot { + postgres := cfg.GetPostgres() + redis := cfg.GetRedis() + s3Cfg := cfg.GetS3() + return entstore.ConfigSnapshot{ + SourceMode: strings.TrimSpace(cfg.GetSourceMode()), + BackupRoot: strings.TrimSpace(cfg.GetBackupRoot()), + SQLitePath: strings.TrimSpace(cfg.GetSqlitePath()), + RetentionDays: cfg.GetRetentionDays(), + KeepLast: cfg.GetKeepLast(), + Postgres: entstore.SourceConfig{ + Host: strings.TrimSpace(postgres.GetHost()), + Port: postgres.GetPort(), + User: strings.TrimSpace(postgres.GetUser()), + Password: strings.TrimSpace(postgres.GetPassword()), + Database: strings.TrimSpace(postgres.GetDatabase()), + SSLMode: strings.TrimSpace(postgres.GetSslMode()), + ContainerName: strings.TrimSpace(postgres.GetContainerName()), + }, + Redis: entstore.SourceConfig{ + Addr: strings.TrimSpace(redis.GetAddr()), + Username: strings.TrimSpace(redis.GetUsername()), + Password: strings.TrimSpace(redis.GetPassword()), + DB: redis.GetDb(), + ContainerName: strings.TrimSpace(redis.GetContainerName()), + }, + S3: entstore.S3Config{ + Enabled: s3Cfg.GetEnabled(), + Endpoint: strings.TrimSpace(s3Cfg.GetEndpoint()), + Region: strings.TrimSpace(s3Cfg.GetRegion()), + Bucket: strings.TrimSpace(s3Cfg.GetBucket()), + AccessKeyID: strings.TrimSpace(s3Cfg.GetAccessKeyId()), + SecretAccessKey: strings.TrimSpace(s3Cfg.GetSecretAccessKey()), + Prefix: strings.Trim(strings.TrimSpace(s3Cfg.GetPrefix()), "/"), + ForcePathStyle: s3Cfg.GetForcePathStyle(), + UseSSL: s3Cfg.GetUseSsl(), + }, + } +} + +func toProtoConfig(cfg *entstore.ConfigSnapshot) *backupv1.BackupConfig { + if cfg == nil { + return &backupv1.BackupConfig{} + } + return &backupv1.BackupConfig{ + SourceMode: cfg.SourceMode, + BackupRoot: cfg.BackupRoot, + SqlitePath: cfg.SQLitePath, + RetentionDays: cfg.RetentionDays, + KeepLast: cfg.KeepLast, + Postgres: &backupv1.SourceConfig{ + Host: cfg.Postgres.Host, + Port: cfg.Postgres.Port, + User: cfg.Postgres.User, + Password: cfg.Postgres.Password, + Database: cfg.Postgres.Database, + SslMode: cfg.Postgres.SSLMode, + ContainerName: cfg.Postgres.ContainerName, + }, + Redis: &backupv1.SourceConfig{ + Addr: cfg.Redis.Addr, + Username: cfg.Redis.Username, + Password: cfg.Redis.Password, + Db: cfg.Redis.DB, + ContainerName: cfg.Redis.ContainerName, + }, + S3: &backupv1.S3Config{ + Enabled: cfg.S3.Enabled, + Endpoint: cfg.S3.Endpoint, + Region: cfg.S3.Region, + Bucket: cfg.S3.Bucket, + AccessKeyId: cfg.S3.AccessKeyID, + SecretAccessKey: cfg.S3.SecretAccessKey, + Prefix: cfg.S3.Prefix, + ForcePathStyle: cfg.S3.ForcePathStyle, + UseSsl: cfg.S3.UseSSL, + }, + } +} + +func toProtoJob(job *ent.BackupJob) *backupv1.BackupJob { + if job == nil { + return &backupv1.BackupJob{} + } + out := &backupv1.BackupJob{ + JobId: job.JobID, + BackupType: job.BackupType.String(), + Status: job.Status.String(), + TriggeredBy: job.TriggeredBy, + IdempotencyKey: job.IdempotencyKey, + UploadToS3: job.UploadToS3, + ErrorMessage: job.ErrorMessage, + Artifact: &backupv1.BackupArtifact{ + LocalPath: job.ArtifactLocalPath, + SizeBytes: nillableInt64(job.ArtifactSizeBytes), + Sha256: job.ArtifactSha256, + }, + S3Object: &backupv1.BackupS3Object{ + Bucket: job.S3Bucket, + Key: job.S3Key, + Etag: job.S3Etag, + }, + } + if job.StartedAt != nil { + out.StartedAt = job.StartedAt.UTC().Format(time.RFC3339) + } + if job.FinishedAt != nil { + out.FinishedAt = job.FinishedAt.UTC().Format(time.RFC3339) + } + return out +} + +func nillableInt64(v *int64) int64 { + if v == nil { + return 0 + } + return *v +} diff --git a/backup/internal/s3client/client.go b/backup/internal/s3client/client.go new file mode 100644 index 000000000..7a54475ac --- /dev/null +++ b/backup/internal/s3client/client.go @@ -0,0 +1,142 @@ +package s3client + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/aws/aws-sdk-go-v2/aws" + awsconfig "github.com/aws/aws-sdk-go-v2/config" + "github.com/aws/aws-sdk-go-v2/credentials" + "github.com/aws/aws-sdk-go-v2/feature/s3/manager" + "github.com/aws/aws-sdk-go-v2/service/s3" +) + +type Config struct { + Endpoint string + Region string + AccessKeyID string + SecretAccessKey string + ForcePathStyle bool + UseSSL bool + Bucket string + Prefix string +} + +type Client struct { + raw *s3.Client + bucket string + prefix string +} + +func New(ctx context.Context, cfg Config) (*Client, error) { + region := strings.TrimSpace(cfg.Region) + if region == "" { + region = "us-east-1" + } + + loadOptions := []func(*awsconfig.LoadOptions) error{ + awsconfig.WithRegion(region), + } + if strings.TrimSpace(cfg.AccessKeyID) != "" || strings.TrimSpace(cfg.SecretAccessKey) != "" { + loadOptions = append(loadOptions, awsconfig.WithCredentialsProvider( + credentials.NewStaticCredentialsProvider( + strings.TrimSpace(cfg.AccessKeyID), + strings.TrimSpace(cfg.SecretAccessKey), + "", + ), + )) + } + + awsCfg, err := awsconfig.LoadDefaultConfig(ctx, loadOptions...) + if err != nil { + return nil, err + } + + client := s3.NewFromConfig(awsCfg, func(options *s3.Options) { + options.UsePathStyle = cfg.ForcePathStyle + if endpoint := normalizeEndpoint(strings.TrimSpace(cfg.Endpoint), cfg.UseSSL); endpoint != "" { + options.EndpointResolver = s3.EndpointResolverFromURL(endpoint) + } + }) + + return &Client{ + raw: client, + bucket: strings.TrimSpace(cfg.Bucket), + prefix: strings.Trim(strings.TrimSpace(cfg.Prefix), "/"), + }, nil +} + +func (c *Client) Raw() *s3.Client { + if c == nil { + return nil + } + return c.raw +} + +func (c *Client) Bucket() string { + if c == nil { + return "" + } + return c.bucket +} + +func (c *Client) Prefix() string { + if c == nil { + return "" + } + return c.prefix +} + +func (c *Client) UploadFile(ctx context.Context, localPath, key string) (string, error) { + if c == nil || c.raw == nil { + return "", fmt.Errorf("s3 client is not initialized") + } + if strings.TrimSpace(c.bucket) == "" { + return "", fmt.Errorf("s3 bucket is required") + } + + path := strings.TrimSpace(localPath) + if path == "" { + return "", fmt.Errorf("local file path is required") + } + objectKey := strings.Trim(strings.TrimSpace(key), "/") + if objectKey == "" { + objectKey = filepath.Base(path) + } + + reader, err := os.Open(path) + if err != nil { + return "", err + } + defer func() { + _ = reader.Close() + }() + + uploader := manager.NewUploader(c.raw) + out, err := uploader.Upload(ctx, &s3.PutObjectInput{ + Bucket: aws.String(c.bucket), + Key: aws.String(objectKey), + Body: reader, + }) + if err != nil { + return "", err + } + return strings.TrimSpace(aws.ToString(out.ETag)), nil +} + +func normalizeEndpoint(endpoint string, useSSL bool) string { + trimmed := strings.TrimSpace(endpoint) + if trimmed == "" { + return "" + } + if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { + return trimmed + } + if useSSL { + return "https://" + trimmed + } + return "http://" + trimmed +} diff --git a/backup/internal/s3client/doc.go b/backup/internal/s3client/doc.go new file mode 100644 index 000000000..b5a3ff448 --- /dev/null +++ b/backup/internal/s3client/doc.go @@ -0,0 +1 @@ +package s3client diff --git a/backup/internal/store/entstore/doc.go b/backup/internal/store/entstore/doc.go new file mode 100644 index 000000000..b6fa08a51 --- /dev/null +++ b/backup/internal/store/entstore/doc.go @@ -0,0 +1 @@ +package entstore diff --git a/backup/internal/store/entstore/store.go b/backup/internal/store/entstore/store.go new file mode 100644 index 000000000..219a0e239 --- /dev/null +++ b/backup/internal/store/entstore/store.go @@ -0,0 +1,729 @@ +package entstore + +import ( + "context" + "database/sql" + "encoding/base64" + "errors" + "fmt" + "strconv" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/backup/ent" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" + "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" + "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + _ "modernc.org/sqlite" +) + +const ( + defaultSQLitePath = "/tmp/sub2api-backupd.db" + idempotencyWindow = 10 * time.Minute +) + +type SourceConfig struct { + Host string + Port int32 + User string + Password string + Database string + SSLMode string + Addr string + Username string + DB int32 + ContainerName string +} + +type S3Config struct { + Enabled bool + Endpoint string + Region string + Bucket string + AccessKeyID string + SecretAccessKey string + Prefix string + ForcePathStyle bool + UseSSL bool +} + +type ConfigSnapshot struct { + SourceMode string + BackupRoot string + SQLitePath string + RetentionDays int32 + KeepLast int32 + Postgres SourceConfig + Redis SourceConfig + S3 S3Config +} + +type CreateBackupJobInput struct { + BackupType string + UploadToS3 bool + TriggeredBy string + IdempotencyKey string +} + +type ListBackupJobsInput struct { + PageSize int32 + PageToken string + Status string + BackupType string +} + +type ListBackupJobsOutput struct { + Items []*ent.BackupJob + NextPageToken string +} + +type BackupArtifactSnapshot struct { + LocalPath string + SizeBytes int64 + SHA256 string +} + +type BackupS3ObjectSnapshot struct { + Bucket string + Key string + ETag string +} + +type FinishBackupJobInput struct { + JobID string + Status string + ErrorMessage string + Artifact *BackupArtifactSnapshot + S3Object *BackupS3ObjectSnapshot +} + +type Store struct { + client *ent.Client + sqlitePath string +} + +func Open(ctx context.Context, sqlitePath string) (*Store, error) { + path := normalizeSQLitePath(sqlitePath) + dsn := sqliteDSN(path) + + db, err := sql.Open("sqlite", dsn) + if err != nil { + return nil, err + } + + if _, err := db.ExecContext(ctx, "PRAGMA journal_mode=WAL;"); err != nil { + _ = db.Close() + return nil, err + } + if _, err := db.ExecContext(ctx, "PRAGMA busy_timeout=5000;"); err != nil { + _ = db.Close() + return nil, err + } + + drv := entsql.OpenDB(dialect.SQLite, db) + client := ent.NewClient(ent.Driver(drv)) + if err := client.Schema.Create(ctx); err != nil { + _ = client.Close() + return nil, err + } + + store := &Store{client: client, sqlitePath: path} + if err := store.ensureDefaults(ctx); err != nil { + _ = client.Close() + return nil, err + } + return store, nil +} + +func (s *Store) Close() error { + if s == nil || s.client == nil { + return nil + } + return s.client.Close() +} + +func (s *Store) GetConfig(ctx context.Context) (*ConfigSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + setting, err := s.client.BackupSetting.Query().Order(ent.Asc(backupsetting.FieldID)).First(ctx) + if err != nil { + return nil, err + } + postgresCfg, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypePostgres) + if err != nil { + return nil, err + } + redisCfg, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypeRedis) + if err != nil { + return nil, err + } + s3Cfg, err := s.client.BackupS3Config.Query().Order(ent.Asc(backups3config.FieldID)).First(ctx) + if err != nil { + return nil, err + } + + cfg := &ConfigSnapshot{ + SourceMode: setting.SourceMode.String(), + BackupRoot: setting.BackupRoot, + SQLitePath: setting.SqlitePath, + RetentionDays: int32(setting.RetentionDays), + KeepLast: int32(setting.KeepLast), + Postgres: SourceConfig{ + Host: postgresCfg.Host, + Port: int32(nillableInt(postgresCfg.Port)), + User: postgresCfg.Username, + Password: postgresCfg.PasswordEncrypted, + Database: postgresCfg.Database, + SSLMode: postgresCfg.SslMode, + ContainerName: postgresCfg.ContainerName, + }, + Redis: SourceConfig{ + Addr: redisCfg.Addr, + Username: redisCfg.Username, + Password: redisCfg.PasswordEncrypted, + DB: int32(nillableInt(redisCfg.RedisDb)), + ContainerName: redisCfg.ContainerName, + }, + S3: S3Config{ + Enabled: s3Cfg.Enabled, + Endpoint: s3Cfg.Endpoint, + Region: s3Cfg.Region, + Bucket: s3Cfg.Bucket, + AccessKeyID: s3Cfg.AccessKeyID, + SecretAccessKey: s3Cfg.SecretAccessKeyEncrypted, + Prefix: s3Cfg.Prefix, + ForcePathStyle: s3Cfg.ForcePathStyle, + UseSSL: s3Cfg.UseSsl, + }, + } + return cfg, nil +} + +func (s *Store) UpdateConfig(ctx context.Context, cfg ConfigSnapshot) (*ConfigSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + setting, err := tx.BackupSetting.Query().Order(ent.Asc(backupsetting.FieldID)).First(ctx) + if err != nil { + return nil, err + } + updatedSetting := tx.BackupSetting.UpdateOneID(setting.ID). + SetSourceMode(backupsetting.SourceMode(cfg.SourceMode)). + SetBackupRoot(strings.TrimSpace(cfg.BackupRoot)). + SetRetentionDays(int(cfg.RetentionDays)). + SetKeepLast(int(cfg.KeepLast)). + SetSqlitePath(strings.TrimSpace(cfg.SQLitePath)) + if _, err = updatedSetting.Save(ctx); err != nil { + return nil, err + } + + if err = s.upsertSourceConfigTx(ctx, tx, backupsourceconfig.SourceTypePostgres, cfg.Postgres); err != nil { + return nil, err + } + if err = s.upsertSourceConfigTx(ctx, tx, backupsourceconfig.SourceTypeRedis, cfg.Redis); err != nil { + return nil, err + } + + s3Entity, err := tx.BackupS3Config.Query().Order(ent.Asc(backups3config.FieldID)).First(ctx) + if err != nil { + return nil, err + } + s3Updater := tx.BackupS3Config.UpdateOneID(s3Entity.ID). + SetEnabled(cfg.S3.Enabled). + SetEndpoint(strings.TrimSpace(cfg.S3.Endpoint)). + SetRegion(strings.TrimSpace(cfg.S3.Region)). + SetBucket(strings.TrimSpace(cfg.S3.Bucket)). + SetAccessKeyID(strings.TrimSpace(cfg.S3.AccessKeyID)). + SetPrefix(strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/")). + SetForcePathStyle(cfg.S3.ForcePathStyle). + SetUseSsl(cfg.S3.UseSSL) + if strings.TrimSpace(cfg.S3.SecretAccessKey) != "" { + s3Updater.SetSecretAccessKeyEncrypted(strings.TrimSpace(cfg.S3.SecretAccessKey)) + } + if _, err = s3Updater.Save(ctx); err != nil { + return nil, err + } + + if err = tx.Commit(); err != nil { + return nil, err + } + return s.GetConfig(ctx) +} + +func (s *Store) CreateBackupJob(ctx context.Context, input CreateBackupJobInput) (*ent.BackupJob, bool, error) { + if strings.TrimSpace(input.TriggeredBy) == "" { + input.TriggeredBy = "admin:unknown" + } + now := time.Now() + + if strings.TrimSpace(input.IdempotencyKey) != "" { + existing, err := s.client.BackupJob.Query(). + Where( + backupjob.BackupTypeEQ(backupjob.BackupType(input.BackupType)), + backupjob.TriggeredByEQ(input.TriggeredBy), + backupjob.IdempotencyKeyEQ(strings.TrimSpace(input.IdempotencyKey)), + backupjob.CreatedAtGTE(now.Add(-idempotencyWindow)), + ). + Order(ent.Desc(backupjob.FieldCreatedAt), ent.Desc(backupjob.FieldID)). + First(ctx) + if err == nil { + return existing, false, nil + } + if !ent.IsNotFound(err) { + return nil, false, err + } + } + + jobID := generateJobID(now) + builder := s.client.BackupJob.Create(). + SetJobID(jobID). + SetBackupType(backupjob.BackupType(input.BackupType)). + SetStatus(backupjob.StatusQueued). + SetTriggeredBy(strings.TrimSpace(input.TriggeredBy)). + SetUploadToS3(input.UploadToS3) + if strings.TrimSpace(input.IdempotencyKey) != "" { + builder.SetIdempotencyKey(strings.TrimSpace(input.IdempotencyKey)) + } + + job, err := builder.Save(ctx) + if err != nil { + return nil, false, err + } + + _, _ = s.client.BackupJobEvent.Create(). + SetBackupJobID(job.ID). + SetEventType("state_change"). + SetMessage("job queued"). + Save(ctx) + + return job, true, nil +} + +func (s *Store) AcquireNextQueuedJob(ctx context.Context) (*ent.BackupJob, error) { + job, err := s.client.BackupJob.Query(). + Where(backupjob.StatusEQ(backupjob.StatusQueued)). + Order(ent.Asc(backupjob.FieldCreatedAt), ent.Asc(backupjob.FieldID)). + First(ctx) + if err != nil { + return nil, err + } + + now := time.Now() + updated, err := s.client.BackupJob.UpdateOneID(job.ID). + SetStatus(backupjob.StatusRunning). + SetStartedAt(now). + ClearFinishedAt(). + ClearErrorMessage(). + Save(ctx) + if err != nil { + return nil, err + } + + if err := s.appendJobEventByEntityID(ctx, updated.ID, backupjobevent.LevelInfo, "state_change", "job started", ""); err != nil { + return nil, err + } + return updated, nil +} + +func (s *Store) FinishBackupJob(ctx context.Context, input FinishBackupJobInput) (*ent.BackupJob, error) { + jobID := strings.TrimSpace(input.JobID) + if jobID == "" { + return nil, errors.New("job_id is required") + } + status, err := parseBackupStatus(strings.TrimSpace(input.Status)) + if err != nil { + return nil, err + } + + job, err := s.GetBackupJob(ctx, jobID) + if err != nil { + return nil, err + } + + updater := s.client.BackupJob.UpdateOneID(job.ID). + SetStatus(status). + SetFinishedAt(time.Now()) + if strings.TrimSpace(input.ErrorMessage) != "" { + updater.SetErrorMessage(strings.TrimSpace(input.ErrorMessage)) + } else { + updater.ClearErrorMessage() + } + if input.Artifact != nil { + updater.SetArtifactLocalPath(strings.TrimSpace(input.Artifact.LocalPath)) + updater.SetArtifactSha256(strings.TrimSpace(input.Artifact.SHA256)) + updater.SetNillableArtifactSizeBytes(&input.Artifact.SizeBytes) + } + if input.S3Object != nil { + updater.SetS3Bucket(strings.TrimSpace(input.S3Object.Bucket)) + updater.SetS3Key(strings.TrimSpace(input.S3Object.Key)) + updater.SetS3Etag(strings.TrimSpace(input.S3Object.ETag)) + } + updated, err := updater.Save(ctx) + if err != nil { + return nil, err + } + + eventLevel := backupjobevent.LevelInfo + if status == backupjob.StatusFailed { + eventLevel = backupjobevent.LevelError + } else if status == backupjob.StatusPartialSucceeded { + eventLevel = backupjobevent.LevelWarning + } + message := fmt.Sprintf("job finished: %s", status.String()) + if strings.TrimSpace(input.ErrorMessage) != "" { + message = message + ": " + strings.TrimSpace(input.ErrorMessage) + } + if err := s.appendJobEventByEntityID(ctx, updated.ID, eventLevel, "state_change", message, ""); err != nil { + return nil, err + } + return updated, nil +} + +func (s *Store) AppendJobEvent(ctx context.Context, jobID, level, eventType, message, payload string) error { + job, err := s.GetBackupJob(ctx, jobID) + if err != nil { + return err + } + lv, err := parseEventLevel(level) + if err != nil { + return err + } + return s.appendJobEventByEntityID( + ctx, + job.ID, + lv, + strings.TrimSpace(eventType), + strings.TrimSpace(message), + strings.TrimSpace(payload), + ) +} + +func (s *Store) RequeueRunningJobs(ctx context.Context) (int, error) { + jobs, err := s.client.BackupJob.Query(). + Where(backupjob.StatusEQ(backupjob.StatusRunning)). + All(ctx) + if err != nil { + return 0, err + } + if len(jobs) == 0 { + return 0, nil + } + + ids := make([]int, 0, len(jobs)) + for _, item := range jobs { + ids = append(ids, item.ID) + } + affected, err := s.client.BackupJob.Update(). + Where(backupjob.IDIn(ids...)). + SetStatus(backupjob.StatusQueued). + ClearFinishedAt(). + SetErrorMessage("job requeued after backupd restart"). + Save(ctx) + if err != nil { + return 0, err + } + + for _, item := range jobs { + _ = s.appendJobEventByEntityID( + ctx, + item.ID, + backupjobevent.LevelWarning, + "state_change", + "job requeued after backupd restart", + "", + ) + } + return affected, nil +} + +func (s *Store) ListFinishedJobsForRetention(ctx context.Context) ([]*ent.BackupJob, error) { + return s.client.BackupJob.Query(). + Where( + backupjob.Or( + backupjob.StatusEQ(backupjob.StatusSucceeded), + backupjob.StatusEQ(backupjob.StatusPartialSucceeded), + ), + backupjob.ArtifactLocalPathNEQ(""), + ). + Order(ent.Desc(backupjob.FieldFinishedAt), ent.Desc(backupjob.FieldID)). + All(ctx) +} + +func (s *Store) ListBackupJobs(ctx context.Context, input ListBackupJobsInput) (*ListBackupJobsOutput, error) { + pageSize := int(input.PageSize) + if pageSize <= 0 { + pageSize = 20 + } + if pageSize > 200 { + pageSize = 200 + } + offset, err := decodePageToken(input.PageToken) + if err != nil { + return nil, err + } + + query := s.client.BackupJob.Query() + if strings.TrimSpace(input.Status) != "" { + query = query.Where(backupjob.StatusEQ(backupjob.Status(strings.TrimSpace(input.Status)))) + } + if strings.TrimSpace(input.BackupType) != "" { + query = query.Where(backupjob.BackupTypeEQ(backupjob.BackupType(strings.TrimSpace(input.BackupType)))) + } + + items, err := query. + Order(ent.Desc(backupjob.FieldCreatedAt), ent.Desc(backupjob.FieldID)). + Offset(offset). + Limit(pageSize). + All(ctx) + if err != nil { + return nil, err + } + + nextToken := "" + if len(items) == pageSize { + nextToken = encodePageToken(offset + len(items)) + } + return &ListBackupJobsOutput{Items: items, NextPageToken: nextToken}, nil +} + +func (s *Store) GetBackupJob(ctx context.Context, jobID string) (*ent.BackupJob, error) { + return s.client.BackupJob.Query().Where(backupjob.JobIDEQ(strings.TrimSpace(jobID))).First(ctx) +} + +func (s *Store) getSourceConfig(ctx context.Context, sourceType backupsourceconfig.SourceType) (*ent.BackupSourceConfig, error) { + return s.client.BackupSourceConfig.Query().Where(backupsourceconfig.SourceTypeEQ(sourceType)).First(ctx) +} + +func (s *Store) appendJobEventByEntityID(ctx context.Context, backupJobID int, level backupjobevent.Level, eventType, message, payload string) error { + eventBuilder := s.client.BackupJobEvent.Create(). + SetBackupJobID(backupJobID). + SetLevel(level). + SetEventType(defaultIfBlank(eventType, "state_change")). + SetMessage(defaultIfBlank(message, "event")) + if strings.TrimSpace(payload) != "" { + eventBuilder.SetPayload(strings.TrimSpace(payload)) + } + _, err := eventBuilder.Save(ctx) + return err +} + +func (s *Store) upsertSourceConfigTx(ctx context.Context, tx *ent.Tx, sourceType backupsourceconfig.SourceType, cfg SourceConfig) error { + entity, err := tx.BackupSourceConfig.Query().Where(backupsourceconfig.SourceTypeEQ(sourceType)).First(ctx) + if err != nil { + return err + } + + updater := tx.BackupSourceConfig.UpdateOneID(entity.ID). + SetHost(strings.TrimSpace(cfg.Host)). + SetPort(int(cfg.Port)). + SetUsername(strings.TrimSpace(cfg.User)). + SetDatabase(strings.TrimSpace(cfg.Database)). + SetSslMode(strings.TrimSpace(cfg.SSLMode)). + SetAddr(strings.TrimSpace(cfg.Addr)). + SetRedisDb(int(cfg.DB)). + SetContainerName(strings.TrimSpace(cfg.ContainerName)) + if strings.TrimSpace(cfg.Username) != "" { + updater.SetUsername(strings.TrimSpace(cfg.Username)) + } + if strings.TrimSpace(cfg.Password) != "" { + updater.SetPasswordEncrypted(strings.TrimSpace(cfg.Password)) + } + _, err = updater.Save(ctx) + return err +} + +func (s *Store) ensureDefaults(ctx context.Context) error { + if _, err := s.client.BackupSetting.Query().First(ctx); err != nil { + if !ent.IsNotFound(err) { + return err + } + if _, err := s.client.BackupSetting.Create(). + SetSourceMode(backupsetting.SourceModeDirect). + SetBackupRoot("/var/lib/sub2api/backups"). + SetRetentionDays(7). + SetKeepLast(30). + SetSqlitePath(s.sqlitePath). + Save(ctx); err != nil { + return err + } + } + + if _, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypePostgres); err != nil { + if !ent.IsNotFound(err) { + return err + } + if _, err := s.client.BackupSourceConfig.Create(). + SetSourceType(backupsourceconfig.SourceTypePostgres). + SetHost("127.0.0.1"). + SetPort(5432). + SetUsername("postgres"). + SetDatabase("sub2api"). + SetSslMode("disable"). + SetContainerName(""). + Save(ctx); err != nil { + return err + } + } + + if _, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypeRedis); err != nil { + if !ent.IsNotFound(err) { + return err + } + if _, err := s.client.BackupSourceConfig.Create(). + SetSourceType(backupsourceconfig.SourceTypeRedis). + SetAddr("127.0.0.1:6379"). + SetRedisDb(0). + SetContainerName(""). + Save(ctx); err != nil { + return err + } + } + + if _, err := s.client.BackupS3Config.Query().First(ctx); err != nil { + if !ent.IsNotFound(err) { + return err + } + if _, err := s.client.BackupS3Config.Create(). + SetEnabled(false). + SetEndpoint(""). + SetRegion(""). + SetBucket(""). + SetAccessKeyID(""). + SetPrefix(""). + SetForcePathStyle(false). + SetUseSsl(true). + Save(ctx); err != nil { + return err + } + } + return nil +} + +func normalizeSQLitePath(sqlitePath string) string { + path := strings.TrimSpace(sqlitePath) + if path == "" { + return defaultSQLitePath + } + return path +} + +func sqliteDSN(path string) string { + dsn := path + if !strings.HasPrefix(path, "file:") { + dsn = "file:" + path + } + + params := make([]string, 0, 2) + if !strings.Contains(dsn, "_fk=1") { + params = append(params, "_fk=1") + } + if !strings.Contains(dsn, "_pragma=foreign_keys(1)") && !strings.Contains(dsn, "_pragma=foreign_keys%281%29") { + params = append(params, "_pragma=foreign_keys(1)") + } + if len(params) == 0 { + return dsn + } + separator := "?" + if strings.Contains(dsn, "?") { + separator = "&" + } + return dsn + separator + strings.Join(params, "&") +} + +func nillableInt(v *int) int { + if v == nil { + return 0 + } + return *v +} + +func generateJobID(now time.Time) string { + timestamp := now.UTC().Format("20060102_150405") + suffix := strconv.FormatInt(now.UnixNano()%0xffffff, 16) + if len(suffix) < 6 { + suffix = strings.Repeat("0", 6-len(suffix)) + suffix + } + return fmt.Sprintf("bk_%s_%s", timestamp, suffix) +} + +func decodePageToken(token string) (int, error) { + t := strings.TrimSpace(token) + if t == "" { + return 0, nil + } + raw, err := base64.StdEncoding.DecodeString(t) + if err != nil { + return 0, err + } + offset, err := strconv.Atoi(string(raw)) + if err != nil { + return 0, err + } + if offset < 0 { + return 0, errors.New("negative page token") + } + return offset, nil +} + +func encodePageToken(offset int) string { + if offset <= 0 { + return "" + } + return base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(offset))) +} + +func parseBackupStatus(v string) (backupjob.Status, error) { + switch strings.TrimSpace(v) { + case backupjob.StatusQueued.String(): + return backupjob.StatusQueued, nil + case backupjob.StatusRunning.String(): + return backupjob.StatusRunning, nil + case backupjob.StatusSucceeded.String(): + return backupjob.StatusSucceeded, nil + case backupjob.StatusFailed.String(): + return backupjob.StatusFailed, nil + case backupjob.StatusPartialSucceeded.String(): + return backupjob.StatusPartialSucceeded, nil + default: + return "", fmt.Errorf("invalid backup status: %s", v) + } +} + +func parseEventLevel(v string) (backupjobevent.Level, error) { + switch strings.TrimSpace(v) { + case "", backupjobevent.LevelInfo.String(): + return backupjobevent.LevelInfo, nil + case backupjobevent.LevelWarning.String(): + return backupjobevent.LevelWarning, nil + case backupjobevent.LevelError.String(): + return backupjobevent.LevelError, nil + default: + return "", fmt.Errorf("invalid event level: %s", v) + } +} + +func defaultIfBlank(v, fallback string) string { + trimmed := strings.TrimSpace(v) + if trimmed == "" { + return fallback + } + return trimmed +} diff --git a/backup/internal/store/entstore/store_test.go b/backup/internal/store/entstore/store_test.go new file mode 100644 index 000000000..c5217ee58 --- /dev/null +++ b/backup/internal/store/entstore/store_test.go @@ -0,0 +1,153 @@ +package entstore + +import ( + "context" + "path/filepath" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" + "github.com/stretchr/testify/require" +) + +func TestStore_CreateAcquireFinishBackupJob(t *testing.T) { + store := openTestStore(t) + + job, created, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ + BackupType: backupjob.BackupTypePostgres.String(), + UploadToS3: true, + TriggeredBy: "admin:1", + IdempotencyKey: "idem-1", + }) + require.NoError(t, err) + require.True(t, created) + require.Equal(t, backupjob.StatusQueued, job.Status) + + acquired, err := store.AcquireNextQueuedJob(context.Background()) + require.NoError(t, err) + require.Equal(t, job.JobID, acquired.JobID) + require.Equal(t, backupjob.StatusRunning, acquired.Status) + require.NotNil(t, acquired.StartedAt) + + size := int64(1024) + finished, err := store.FinishBackupJob(context.Background(), FinishBackupJobInput{ + JobID: acquired.JobID, + Status: backupjob.StatusSucceeded.String(), + Artifact: &BackupArtifactSnapshot{ + LocalPath: "/tmp/demo/bundle.tar.gz", + SizeBytes: size, + SHA256: "sha256-demo", + }, + S3Object: &BackupS3ObjectSnapshot{ + Bucket: "bucket-demo", + Key: "demo/key", + ETag: "etag-demo", + }, + }) + require.NoError(t, err) + require.Equal(t, backupjob.StatusSucceeded, finished.Status) + require.NotNil(t, finished.FinishedAt) + require.Equal(t, "/tmp/demo/bundle.tar.gz", finished.ArtifactLocalPath) + require.Equal(t, "sha256-demo", finished.ArtifactSha256) + require.NotNil(t, finished.ArtifactSizeBytes) + require.Equal(t, size, *finished.ArtifactSizeBytes) + require.Equal(t, "bucket-demo", finished.S3Bucket) + require.Equal(t, "demo/key", finished.S3Key) +} + +func TestStore_CreateBackupJob_Idempotency(t *testing.T) { + store := openTestStore(t) + + first, created, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ + BackupType: backupjob.BackupTypeRedis.String(), + UploadToS3: false, + TriggeredBy: "admin:2", + IdempotencyKey: "idem-same", + }) + require.NoError(t, err) + require.True(t, created) + + second, created, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ + BackupType: backupjob.BackupTypeRedis.String(), + UploadToS3: false, + TriggeredBy: "admin:2", + IdempotencyKey: "idem-same", + }) + require.NoError(t, err) + require.False(t, created) + require.Equal(t, first.JobID, second.JobID) +} + +func TestStore_RequeueRunningJobs(t *testing.T) { + store := openTestStore(t) + + _, _, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ + BackupType: backupjob.BackupTypeFull.String(), + UploadToS3: false, + TriggeredBy: "admin:3", + }) + require.NoError(t, err) + + acquired, err := store.AcquireNextQueuedJob(context.Background()) + require.NoError(t, err) + require.Equal(t, backupjob.StatusRunning, acquired.Status) + + count, err := store.RequeueRunningJobs(context.Background()) + require.NoError(t, err) + require.Equal(t, 1, count) + + job, err := store.GetBackupJob(context.Background(), acquired.JobID) + require.NoError(t, err) + require.Equal(t, backupjob.StatusQueued, job.Status) + require.Equal(t, "job requeued after backupd restart", job.ErrorMessage) +} + +func TestStore_UpdateConfig_KeepSecretWhenEmpty(t *testing.T) { + store := openTestStore(t) + ctx := context.Background() + + cfg, err := store.GetConfig(ctx) + require.NoError(t, err) + cfg.SourceMode = "direct" + cfg.BackupRoot = filepath.Join(t.TempDir(), "backups") + cfg.SQLitePath = filepath.Join(t.TempDir(), "meta.db") + cfg.RetentionDays = 7 + cfg.KeepLast = 30 + cfg.Postgres.Password = "pg-secret" + cfg.Redis.Password = "redis-secret" + cfg.S3.SecretAccessKey = "s3-secret" + cfg.S3.Region = "us-east-1" + cfg.S3.Bucket = "demo-bucket" + cfg.S3.AccessKeyID = "demo-ak" + _, err = store.UpdateConfig(ctx, *cfg) + require.NoError(t, err) + + cfg2, err := store.GetConfig(ctx) + require.NoError(t, err) + cfg2.Postgres.Password = "" + cfg2.Redis.Password = "" + cfg2.S3.SecretAccessKey = "" + _, err = store.UpdateConfig(ctx, *cfg2) + require.NoError(t, err) + + finalCfg, err := store.GetConfig(ctx) + require.NoError(t, err) + require.Equal(t, "pg-secret", finalCfg.Postgres.Password) + require.Equal(t, "redis-secret", finalCfg.Redis.Password) + require.Equal(t, "s3-secret", finalCfg.S3.SecretAccessKey) +} + +func openTestStore(t *testing.T) *Store { + t.Helper() + + dbPath := filepath.Join(t.TempDir(), "backupd-test-"+time.Now().Format("150405.000")+".db") + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + store, err := Open(ctx, dbPath) + require.NoError(t, err) + t.Cleanup(func() { + _ = store.Close() + }) + return store +} diff --git a/backup/proto/backup/v1/backup.pb.go b/backup/proto/backup/v1/backup.pb.go new file mode 100644 index 000000000..07a5da1d5 --- /dev/null +++ b/backup/proto/backup/v1/backup.pb.go @@ -0,0 +1,1479 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.10 +// protoc v6.32.1 +// source: proto/backup/v1/backup.proto + +package backupv1 + +import ( + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type HealthRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthRequest) Reset() { + *x = HealthRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthRequest) ProtoMessage() {} + +func (x *HealthRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead. +func (*HealthRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{0} +} + +type HealthResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + UptimeSeconds int64 `protobuf:"varint,3,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *HealthResponse) Reset() { + *x = HealthResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *HealthResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HealthResponse) ProtoMessage() {} + +func (x *HealthResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. +func (*HealthResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{1} +} + +func (x *HealthResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *HealthResponse) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *HealthResponse) GetUptimeSeconds() int64 { + if x != nil { + return x.UptimeSeconds + } + return 0 +} + +type SourceConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` + Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` + User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` + Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` + SslMode string `protobuf:"bytes,6,opt,name=ssl_mode,json=sslMode,proto3" json:"ssl_mode,omitempty"` + Addr string `protobuf:"bytes,7,opt,name=addr,proto3" json:"addr,omitempty"` + Username string `protobuf:"bytes,8,opt,name=username,proto3" json:"username,omitempty"` + Db int32 `protobuf:"varint,9,opt,name=db,proto3" json:"db,omitempty"` + ContainerName string `protobuf:"bytes,10,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceConfig) Reset() { + *x = SourceConfig{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceConfig) ProtoMessage() {} + +func (x *SourceConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceConfig.ProtoReflect.Descriptor instead. +func (*SourceConfig) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{2} +} + +func (x *SourceConfig) GetHost() string { + if x != nil { + return x.Host + } + return "" +} + +func (x *SourceConfig) GetPort() int32 { + if x != nil { + return x.Port + } + return 0 +} + +func (x *SourceConfig) GetUser() string { + if x != nil { + return x.User + } + return "" +} + +func (x *SourceConfig) GetPassword() string { + if x != nil { + return x.Password + } + return "" +} + +func (x *SourceConfig) GetDatabase() string { + if x != nil { + return x.Database + } + return "" +} + +func (x *SourceConfig) GetSslMode() string { + if x != nil { + return x.SslMode + } + return "" +} + +func (x *SourceConfig) GetAddr() string { + if x != nil { + return x.Addr + } + return "" +} + +func (x *SourceConfig) GetUsername() string { + if x != nil { + return x.Username + } + return "" +} + +func (x *SourceConfig) GetDb() int32 { + if x != nil { + return x.Db + } + return 0 +} + +func (x *SourceConfig) GetContainerName() string { + if x != nil { + return x.ContainerName + } + return "" +} + +type S3Config struct { + state protoimpl.MessageState `protogen:"open.v1"` + Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` + Endpoint string `protobuf:"bytes,2,opt,name=endpoint,proto3" json:"endpoint,omitempty"` + Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` + Bucket string `protobuf:"bytes,4,opt,name=bucket,proto3" json:"bucket,omitempty"` + AccessKeyId string `protobuf:"bytes,5,opt,name=access_key_id,json=accessKeyId,proto3" json:"access_key_id,omitempty"` + SecretAccessKey string `protobuf:"bytes,6,opt,name=secret_access_key,json=secretAccessKey,proto3" json:"secret_access_key,omitempty"` + Prefix string `protobuf:"bytes,7,opt,name=prefix,proto3" json:"prefix,omitempty"` + ForcePathStyle bool `protobuf:"varint,8,opt,name=force_path_style,json=forcePathStyle,proto3" json:"force_path_style,omitempty"` + UseSsl bool `protobuf:"varint,9,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *S3Config) Reset() { + *x = S3Config{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *S3Config) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3Config) ProtoMessage() {} + +func (x *S3Config) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3Config.ProtoReflect.Descriptor instead. +func (*S3Config) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{3} +} + +func (x *S3Config) GetEnabled() bool { + if x != nil { + return x.Enabled + } + return false +} + +func (x *S3Config) GetEndpoint() string { + if x != nil { + return x.Endpoint + } + return "" +} + +func (x *S3Config) GetRegion() string { + if x != nil { + return x.Region + } + return "" +} + +func (x *S3Config) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *S3Config) GetAccessKeyId() string { + if x != nil { + return x.AccessKeyId + } + return "" +} + +func (x *S3Config) GetSecretAccessKey() string { + if x != nil { + return x.SecretAccessKey + } + return "" +} + +func (x *S3Config) GetPrefix() string { + if x != nil { + return x.Prefix + } + return "" +} + +func (x *S3Config) GetForcePathStyle() bool { + if x != nil { + return x.ForcePathStyle + } + return false +} + +func (x *S3Config) GetUseSsl() bool { + if x != nil { + return x.UseSsl + } + return false +} + +type BackupConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` + BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` + SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` + RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` + KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` + Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` + Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` + S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupConfig) Reset() { + *x = BackupConfig{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupConfig) ProtoMessage() {} + +func (x *BackupConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. +func (*BackupConfig) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} +} + +func (x *BackupConfig) GetSourceMode() string { + if x != nil { + return x.SourceMode + } + return "" +} + +func (x *BackupConfig) GetBackupRoot() string { + if x != nil { + return x.BackupRoot + } + return "" +} + +func (x *BackupConfig) GetSqlitePath() string { + if x != nil { + return x.SqlitePath + } + return "" +} + +func (x *BackupConfig) GetRetentionDays() int32 { + if x != nil { + return x.RetentionDays + } + return 0 +} + +func (x *BackupConfig) GetKeepLast() int32 { + if x != nil { + return x.KeepLast + } + return 0 +} + +func (x *BackupConfig) GetPostgres() *SourceConfig { + if x != nil { + return x.Postgres + } + return nil +} + +func (x *BackupConfig) GetRedis() *SourceConfig { + if x != nil { + return x.Redis + } + return nil +} + +func (x *BackupConfig) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type GetConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigRequest) Reset() { + *x = GetConfigRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigRequest) ProtoMessage() {} + +func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. +func (*GetConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} +} + +type GetConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigResponse) Reset() { + *x = GetConfigResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigResponse) ProtoMessage() {} + +func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. +func (*GetConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} +} + +func (x *GetConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigRequest) Reset() { + *x = UpdateConfigRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigRequest) ProtoMessage() {} + +func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateConfigRequest) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigResponse) Reset() { + *x = UpdateConfigResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigResponse) ProtoMessage() {} + +func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type ValidateS3Request struct { + state protoimpl.MessageState `protogen:"open.v1"` + S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Request) Reset() { + *x = ValidateS3Request{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Request) ProtoMessage() {} + +func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. +func (*ValidateS3Request) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} +} + +func (x *ValidateS3Request) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type ValidateS3Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Response) Reset() { + *x = ValidateS3Response{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Response) ProtoMessage() {} + +func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. +func (*ValidateS3Response) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} +} + +func (x *ValidateS3Response) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *ValidateS3Response) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type CreateBackupJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateBackupJobRequest) Reset() { + *x = CreateBackupJobRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateBackupJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupJobRequest) ProtoMessage() {} + +func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupJobRequest.ProtoReflect.Descriptor instead. +func (*CreateBackupJobRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} +} + +func (x *CreateBackupJobRequest) GetBackupType() string { + if x != nil { + return x.BackupType + } + return "" +} + +func (x *CreateBackupJobRequest) GetUploadToS3() bool { + if x != nil { + return x.UploadToS3 + } + return false +} + +func (x *CreateBackupJobRequest) GetTriggeredBy() string { + if x != nil { + return x.TriggeredBy + } + return "" +} + +func (x *CreateBackupJobRequest) GetIdempotencyKey() string { + if x != nil { + return x.IdempotencyKey + } + return "" +} + +type BackupArtifact struct { + state protoimpl.MessageState `protogen:"open.v1"` + LocalPath string `protobuf:"bytes,1,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` + SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` + Sha256 string `protobuf:"bytes,3,opt,name=sha256,proto3" json:"sha256,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupArtifact) Reset() { + *x = BackupArtifact{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupArtifact) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupArtifact) ProtoMessage() {} + +func (x *BackupArtifact) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupArtifact.ProtoReflect.Descriptor instead. +func (*BackupArtifact) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} +} + +func (x *BackupArtifact) GetLocalPath() string { + if x != nil { + return x.LocalPath + } + return "" +} + +func (x *BackupArtifact) GetSizeBytes() int64 { + if x != nil { + return x.SizeBytes + } + return 0 +} + +func (x *BackupArtifact) GetSha256() string { + if x != nil { + return x.Sha256 + } + return "" +} + +type BackupS3Object struct { + state protoimpl.MessageState `protogen:"open.v1"` + Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` + Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Etag string `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupS3Object) Reset() { + *x = BackupS3Object{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupS3Object) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupS3Object) ProtoMessage() {} + +func (x *BackupS3Object) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupS3Object.ProtoReflect.Descriptor instead. +func (*BackupS3Object) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} +} + +func (x *BackupS3Object) GetBucket() string { + if x != nil { + return x.Bucket + } + return "" +} + +func (x *BackupS3Object) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *BackupS3Object) GetEtag() string { + if x != nil { + return x.Etag + } + return "" +} + +type BackupJob struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` + ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` + S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupJob) Reset() { + *x = BackupJob{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupJob) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupJob) ProtoMessage() {} + +func (x *BackupJob) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupJob.ProtoReflect.Descriptor instead. +func (*BackupJob) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} +} + +func (x *BackupJob) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +func (x *BackupJob) GetBackupType() string { + if x != nil { + return x.BackupType + } + return "" +} + +func (x *BackupJob) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *BackupJob) GetTriggeredBy() string { + if x != nil { + return x.TriggeredBy + } + return "" +} + +func (x *BackupJob) GetIdempotencyKey() string { + if x != nil { + return x.IdempotencyKey + } + return "" +} + +func (x *BackupJob) GetUploadToS3() bool { + if x != nil { + return x.UploadToS3 + } + return false +} + +func (x *BackupJob) GetStartedAt() string { + if x != nil { + return x.StartedAt + } + return "" +} + +func (x *BackupJob) GetFinishedAt() string { + if x != nil { + return x.FinishedAt + } + return "" +} + +func (x *BackupJob) GetErrorMessage() string { + if x != nil { + return x.ErrorMessage + } + return "" +} + +func (x *BackupJob) GetArtifact() *BackupArtifact { + if x != nil { + return x.Artifact + } + return nil +} + +func (x *BackupJob) GetS3Object() *BackupS3Object { + if x != nil { + return x.S3Object + } + return nil +} + +type CreateBackupJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateBackupJobResponse) Reset() { + *x = CreateBackupJobResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateBackupJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateBackupJobResponse) ProtoMessage() {} + +func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateBackupJobResponse.ProtoReflect.Descriptor instead. +func (*CreateBackupJobResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} +} + +func (x *CreateBackupJobResponse) GetJob() *BackupJob { + if x != nil { + return x.Job + } + return nil +} + +type ListBackupJobsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` + PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + BackupType string `protobuf:"bytes,4,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListBackupJobsRequest) Reset() { + *x = ListBackupJobsRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListBackupJobsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupJobsRequest) ProtoMessage() {} + +func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupJobsRequest.ProtoReflect.Descriptor instead. +func (*ListBackupJobsRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} +} + +func (x *ListBackupJobsRequest) GetPageSize() int32 { + if x != nil { + return x.PageSize + } + return 0 +} + +func (x *ListBackupJobsRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + +func (x *ListBackupJobsRequest) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +func (x *ListBackupJobsRequest) GetBackupType() string { + if x != nil { + return x.BackupType + } + return "" +} + +type ListBackupJobsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*BackupJob `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListBackupJobsResponse) Reset() { + *x = ListBackupJobsResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListBackupJobsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListBackupJobsResponse) ProtoMessage() {} + +func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListBackupJobsResponse.ProtoReflect.Descriptor instead. +func (*ListBackupJobsResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} +} + +func (x *ListBackupJobsResponse) GetItems() []*BackupJob { + if x != nil { + return x.Items + } + return nil +} + +func (x *ListBackupJobsResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + +type GetBackupJobRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetBackupJobRequest) Reset() { + *x = GetBackupJobRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetBackupJobRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBackupJobRequest) ProtoMessage() {} + +func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBackupJobRequest.ProtoReflect.Descriptor instead. +func (*GetBackupJobRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} +} + +func (x *GetBackupJobRequest) GetJobId() string { + if x != nil { + return x.JobId + } + return "" +} + +type GetBackupJobResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetBackupJobResponse) Reset() { + *x = GetBackupJobResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetBackupJobResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetBackupJobResponse) ProtoMessage() {} + +func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetBackupJobResponse.ProtoReflect.Descriptor instead. +func (*GetBackupJobResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} +} + +func (x *GetBackupJobResponse) GetJob() *BackupJob { + if x != nil { + return x.Job + } + return nil +} + +var File_proto_backup_v1_backup_proto protoreflect.FileDescriptor + +const file_proto_backup_v1_backup_proto_rawDesc = "" + + "\n" + + "\x1cproto/backup/v1/backup.proto\x12\tbackup.v1\"\x0f\n" + + "\rHealthRequest\"i\n" + + "\x0eHealthResponse\x12\x16\n" + + "\x06status\x18\x01 \x01(\tR\x06status\x12\x18\n" + + "\aversion\x18\x02 \x01(\tR\aversion\x12%\n" + + "\x0euptime_seconds\x18\x03 \x01(\x03R\ruptimeSeconds\"\x84\x02\n" + + "\fSourceConfig\x12\x12\n" + + "\x04host\x18\x01 \x01(\tR\x04host\x12\x12\n" + + "\x04port\x18\x02 \x01(\x05R\x04port\x12\x12\n" + + "\x04user\x18\x03 \x01(\tR\x04user\x12\x1a\n" + + "\bpassword\x18\x04 \x01(\tR\bpassword\x12\x1a\n" + + "\bdatabase\x18\x05 \x01(\tR\bdatabase\x12\x19\n" + + "\bssl_mode\x18\x06 \x01(\tR\asslMode\x12\x12\n" + + "\x04addr\x18\a \x01(\tR\x04addr\x12\x1a\n" + + "\busername\x18\b \x01(\tR\busername\x12\x0e\n" + + "\x02db\x18\t \x01(\x05R\x02db\x12%\n" + + "\x0econtainer_name\x18\n" + + " \x01(\tR\rcontainerName\"\x9b\x02\n" + + "\bS3Config\x12\x18\n" + + "\aenabled\x18\x01 \x01(\bR\aenabled\x12\x1a\n" + + "\bendpoint\x18\x02 \x01(\tR\bendpoint\x12\x16\n" + + "\x06region\x18\x03 \x01(\tR\x06region\x12\x16\n" + + "\x06bucket\x18\x04 \x01(\tR\x06bucket\x12\"\n" + + "\raccess_key_id\x18\x05 \x01(\tR\vaccessKeyId\x12*\n" + + "\x11secret_access_key\x18\x06 \x01(\tR\x0fsecretAccessKey\x12\x16\n" + + "\x06prefix\x18\a \x01(\tR\x06prefix\x12(\n" + + "\x10force_path_style\x18\b \x01(\bR\x0eforcePathStyle\x12\x17\n" + + "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xbe\x02\n" + + "\fBackupConfig\x12\x1f\n" + + "\vsource_mode\x18\x01 \x01(\tR\n" + + "sourceMode\x12\x1f\n" + + "\vbackup_root\x18\x02 \x01(\tR\n" + + "backupRoot\x12\x1f\n" + + "\vsqlite_path\x18\x03 \x01(\tR\n" + + "sqlitePath\x12%\n" + + "\x0eretention_days\x18\x04 \x01(\x05R\rretentionDays\x12\x1b\n" + + "\tkeep_last\x18\x05 \x01(\x05R\bkeepLast\x123\n" + + "\bpostgres\x18\x06 \x01(\v2\x17.backup.v1.SourceConfigR\bpostgres\x12-\n" + + "\x05redis\x18\a \x01(\v2\x17.backup.v1.SourceConfigR\x05redis\x12#\n" + + "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"\x12\n" + + "\x10GetConfigRequest\"D\n" + + "\x11GetConfigResponse\x12/\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"F\n" + + "\x13UpdateConfigRequest\x12/\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"G\n" + + "\x14UpdateConfigResponse\x12/\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"8\n" + + "\x11ValidateS3Request\x12#\n" + + "\x02s3\x18\x01 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\">\n" + + "\x12ValidateS3Response\x12\x0e\n" + + "\x02ok\x18\x01 \x01(\bR\x02ok\x12\x18\n" + + "\amessage\x18\x02 \x01(\tR\amessage\"\xa7\x01\n" + + "\x16CreateBackupJobRequest\x12\x1f\n" + + "\vbackup_type\x18\x01 \x01(\tR\n" + + "backupType\x12 \n" + + "\fupload_to_s3\x18\x02 \x01(\bR\n" + + "uploadToS3\x12!\n" + + "\ftriggered_by\x18\x03 \x01(\tR\vtriggeredBy\x12'\n" + + "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\"f\n" + + "\x0eBackupArtifact\x12\x1d\n" + + "\n" + + "local_path\x18\x01 \x01(\tR\tlocalPath\x12\x1d\n" + + "\n" + + "size_bytes\x18\x02 \x01(\x03R\tsizeBytes\x12\x16\n" + + "\x06sha256\x18\x03 \x01(\tR\x06sha256\"N\n" + + "\x0eBackupS3Object\x12\x16\n" + + "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12\x10\n" + + "\x03key\x18\x02 \x01(\tR\x03key\x12\x12\n" + + "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9d\x03\n" + + "\tBackupJob\x12\x15\n" + + "\x06job_id\x18\x01 \x01(\tR\x05jobId\x12\x1f\n" + + "\vbackup_type\x18\x02 \x01(\tR\n" + + "backupType\x12\x16\n" + + "\x06status\x18\x03 \x01(\tR\x06status\x12!\n" + + "\ftriggered_by\x18\x04 \x01(\tR\vtriggeredBy\x12'\n" + + "\x0fidempotency_key\x18\x05 \x01(\tR\x0eidempotencyKey\x12 \n" + + "\fupload_to_s3\x18\x06 \x01(\bR\n" + + "uploadToS3\x12\x1d\n" + + "\n" + + "started_at\x18\a \x01(\tR\tstartedAt\x12\x1f\n" + + "\vfinished_at\x18\b \x01(\tR\n" + + "finishedAt\x12#\n" + + "\rerror_message\x18\t \x01(\tR\ferrorMessage\x125\n" + + "\bartifact\x18\n" + + " \x01(\v2\x19.backup.v1.BackupArtifactR\bartifact\x126\n" + + "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\"A\n" + + "\x17CreateBackupJobResponse\x12&\n" + + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job\"\x8c\x01\n" + + "\x15ListBackupJobsRequest\x12\x1b\n" + + "\tpage_size\x18\x01 \x01(\x05R\bpageSize\x12\x1d\n" + + "\n" + + "page_token\x18\x02 \x01(\tR\tpageToken\x12\x16\n" + + "\x06status\x18\x03 \x01(\tR\x06status\x12\x1f\n" + + "\vbackup_type\x18\x04 \x01(\tR\n" + + "backupType\"l\n" + + "\x16ListBackupJobsResponse\x12*\n" + + "\x05items\x18\x01 \x03(\v2\x14.backup.v1.BackupJobR\x05items\x12&\n" + + "\x0fnext_page_token\x18\x02 \x01(\tR\rnextPageToken\",\n" + + "\x13GetBackupJobRequest\x12\x15\n" + + "\x06job_id\x18\x01 \x01(\tR\x05jobId\">\n" + + "\x14GetBackupJobResponse\x12&\n" + + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\xb4\x04\n" + + "\rBackupService\x12=\n" + + "\x06Health\x12\x18.backup.v1.HealthRequest\x1a\x19.backup.v1.HealthResponse\x12F\n" + + "\tGetConfig\x12\x1b.backup.v1.GetConfigRequest\x1a\x1c.backup.v1.GetConfigResponse\x12O\n" + + "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12I\n" + + "\n" + + "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12X\n" + + "\x0fCreateBackupJob\x12!.backup.v1.CreateBackupJobRequest\x1a\".backup.v1.CreateBackupJobResponse\x12U\n" + + "\x0eListBackupJobs\x12 .backup.v1.ListBackupJobsRequest\x1a!.backup.v1.ListBackupJobsResponse\x12O\n" + + "\fGetBackupJob\x12\x1e.backup.v1.GetBackupJobRequest\x1a\x1f.backup.v1.GetBackupJobResponseB=Z;github.com/Wei-Shaw/sub2api/backup/proto/backup/v1;backupv1b\x06proto3" + +var ( + file_proto_backup_v1_backup_proto_rawDescOnce sync.Once + file_proto_backup_v1_backup_proto_rawDescData []byte +) + +func file_proto_backup_v1_backup_proto_rawDescGZIP() []byte { + file_proto_backup_v1_backup_proto_rawDescOnce.Do(func() { + file_proto_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc))) + }) + return file_proto_backup_v1_backup_proto_rawDescData +} + +var file_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_proto_backup_v1_backup_proto_goTypes = []any{ + (*HealthRequest)(nil), // 0: backup.v1.HealthRequest + (*HealthResponse)(nil), // 1: backup.v1.HealthResponse + (*SourceConfig)(nil), // 2: backup.v1.SourceConfig + (*S3Config)(nil), // 3: backup.v1.S3Config + (*BackupConfig)(nil), // 4: backup.v1.BackupConfig + (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest + (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse + (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest + (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse + (*ValidateS3Request)(nil), // 9: backup.v1.ValidateS3Request + (*ValidateS3Response)(nil), // 10: backup.v1.ValidateS3Response + (*CreateBackupJobRequest)(nil), // 11: backup.v1.CreateBackupJobRequest + (*BackupArtifact)(nil), // 12: backup.v1.BackupArtifact + (*BackupS3Object)(nil), // 13: backup.v1.BackupS3Object + (*BackupJob)(nil), // 14: backup.v1.BackupJob + (*CreateBackupJobResponse)(nil), // 15: backup.v1.CreateBackupJobResponse + (*ListBackupJobsRequest)(nil), // 16: backup.v1.ListBackupJobsRequest + (*ListBackupJobsResponse)(nil), // 17: backup.v1.ListBackupJobsResponse + (*GetBackupJobRequest)(nil), // 18: backup.v1.GetBackupJobRequest + (*GetBackupJobResponse)(nil), // 19: backup.v1.GetBackupJobResponse +} +var file_proto_backup_v1_backup_proto_depIdxs = []int32{ + 2, // 0: backup.v1.BackupConfig.postgres:type_name -> backup.v1.SourceConfig + 2, // 1: backup.v1.BackupConfig.redis:type_name -> backup.v1.SourceConfig + 3, // 2: backup.v1.BackupConfig.s3:type_name -> backup.v1.S3Config + 4, // 3: backup.v1.GetConfigResponse.config:type_name -> backup.v1.BackupConfig + 4, // 4: backup.v1.UpdateConfigRequest.config:type_name -> backup.v1.BackupConfig + 4, // 5: backup.v1.UpdateConfigResponse.config:type_name -> backup.v1.BackupConfig + 3, // 6: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config + 12, // 7: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact + 13, // 8: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object + 14, // 9: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob + 14, // 10: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob + 14, // 11: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob + 0, // 12: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest + 5, // 13: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest + 7, // 14: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest + 9, // 15: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request + 11, // 16: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest + 16, // 17: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest + 18, // 18: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest + 1, // 19: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse + 6, // 20: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse + 8, // 21: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse + 10, // 22: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response + 15, // 23: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse + 17, // 24: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse + 19, // 25: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse + 19, // [19:26] is the sub-list for method output_type + 12, // [12:19] is the sub-list for method input_type + 12, // [12:12] is the sub-list for extension type_name + 12, // [12:12] is the sub-list for extension extendee + 0, // [0:12] is the sub-list for field type_name +} + +func init() { file_proto_backup_v1_backup_proto_init() } +func file_proto_backup_v1_backup_proto_init() { + if File_proto_backup_v1_backup_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc)), + NumEnums: 0, + NumMessages: 20, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_proto_backup_v1_backup_proto_goTypes, + DependencyIndexes: file_proto_backup_v1_backup_proto_depIdxs, + MessageInfos: file_proto_backup_v1_backup_proto_msgTypes, + }.Build() + File_proto_backup_v1_backup_proto = out.File + file_proto_backup_v1_backup_proto_goTypes = nil + file_proto_backup_v1_backup_proto_depIdxs = nil +} diff --git a/backup/proto/backup/v1/backup.proto b/backup/proto/backup/v1/backup.proto new file mode 100644 index 000000000..b1fe78627 --- /dev/null +++ b/backup/proto/backup/v1/backup.proto @@ -0,0 +1,139 @@ +syntax = "proto3"; + +package backup.v1; + +option go_package = "github.com/Wei-Shaw/sub2api/backup/proto/backup/v1;backupv1"; + +service BackupService { + rpc Health(HealthRequest) returns (HealthResponse); + rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); + rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse); + rpc ValidateS3(ValidateS3Request) returns (ValidateS3Response); + rpc CreateBackupJob(CreateBackupJobRequest) returns (CreateBackupJobResponse); + rpc ListBackupJobs(ListBackupJobsRequest) returns (ListBackupJobsResponse); + rpc GetBackupJob(GetBackupJobRequest) returns (GetBackupJobResponse); +} + +message HealthRequest {} + +message HealthResponse { + string status = 1; + string version = 2; + int64 uptime_seconds = 3; +} + +message SourceConfig { + string host = 1; + int32 port = 2; + string user = 3; + string password = 4; + string database = 5; + string ssl_mode = 6; + string addr = 7; + string username = 8; + int32 db = 9; + string container_name = 10; +} + +message S3Config { + bool enabled = 1; + string endpoint = 2; + string region = 3; + string bucket = 4; + string access_key_id = 5; + string secret_access_key = 6; + string prefix = 7; + bool force_path_style = 8; + bool use_ssl = 9; +} + +message BackupConfig { + string source_mode = 1; + string backup_root = 2; + string sqlite_path = 3; + int32 retention_days = 4; + int32 keep_last = 5; + SourceConfig postgres = 6; + SourceConfig redis = 7; + S3Config s3 = 8; +} + +message GetConfigRequest {} + +message GetConfigResponse { + BackupConfig config = 1; +} + +message UpdateConfigRequest { + BackupConfig config = 1; +} + +message UpdateConfigResponse { + BackupConfig config = 1; +} + +message ValidateS3Request { + S3Config s3 = 1; +} + +message ValidateS3Response { + bool ok = 1; + string message = 2; +} + +message CreateBackupJobRequest { + string backup_type = 1; + bool upload_to_s3 = 2; + string triggered_by = 3; + string idempotency_key = 4; +} + +message BackupArtifact { + string local_path = 1; + int64 size_bytes = 2; + string sha256 = 3; +} + +message BackupS3Object { + string bucket = 1; + string key = 2; + string etag = 3; +} + +message BackupJob { + string job_id = 1; + string backup_type = 2; + string status = 3; + string triggered_by = 4; + string idempotency_key = 5; + bool upload_to_s3 = 6; + string started_at = 7; + string finished_at = 8; + string error_message = 9; + BackupArtifact artifact = 10; + BackupS3Object s3_object = 11; +} + +message CreateBackupJobResponse { + BackupJob job = 1; +} + +message ListBackupJobsRequest { + int32 page_size = 1; + string page_token = 2; + string status = 3; + string backup_type = 4; +} + +message ListBackupJobsResponse { + repeated BackupJob items = 1; + string next_page_token = 2; +} + +message GetBackupJobRequest { + string job_id = 1; +} + +message GetBackupJobResponse { + BackupJob job = 1; +} diff --git a/backup/proto/backup/v1/backup_grpc.pb.go b/backup/proto/backup/v1/backup_grpc.pb.go new file mode 100644 index 000000000..c9d7383fe --- /dev/null +++ b/backup/proto/backup/v1/backup_grpc.pb.go @@ -0,0 +1,349 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.6.0 +// - protoc v6.32.1 +// source: proto/backup/v1/backup.proto + +package backupv1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.64.0 or later. +const _ = grpc.SupportPackageIsVersion9 + +const ( + BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" + BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" + BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" + BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" + BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" + BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" + BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" +) + +// BackupServiceClient is the client API for BackupService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type BackupServiceClient interface { + Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) + GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) + UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) + CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) + ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) + GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) +} + +type backupServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewBackupServiceClient(cc grpc.ClientConnInterface) BackupServiceClient { + return &backupServiceClient{cc} +} + +func (c *backupServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(HealthResponse) + err := c.cc.Invoke(ctx, BackupService_Health_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetConfigResponse) + err := c.cc.Invoke(ctx, BackupService_GetConfig_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateConfigResponse) + err := c.cc.Invoke(ctx, BackupService_UpdateConfig_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ValidateS3Response) + err := c.cc.Invoke(ctx, BackupService_ValidateS3_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateBackupJobResponse) + err := c.cc.Invoke(ctx, BackupService_CreateBackupJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListBackupJobsResponse) + err := c.cc.Invoke(ctx, BackupService_ListBackupJobs_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(GetBackupJobResponse) + err := c.cc.Invoke(ctx, BackupService_GetBackupJob_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +// BackupServiceServer is the server API for BackupService service. +// All implementations must embed UnimplementedBackupServiceServer +// for forward compatibility. +type BackupServiceServer interface { + Health(context.Context, *HealthRequest) (*HealthResponse, error) + GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) + UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) + CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) + ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) + GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) + mustEmbedUnimplementedBackupServiceServer() +} + +// UnimplementedBackupServiceServer must be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedBackupServiceServer struct{} + +func (UnimplementedBackupServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) { + return nil, status.Error(codes.Unimplemented, "method Health not implemented") +} +func (UnimplementedBackupServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") +} +func (UnimplementedBackupServiceServer) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateConfig not implemented") +} +func (UnimplementedBackupServiceServer) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) { + return nil, status.Error(codes.Unimplemented, "method ValidateS3 not implemented") +} +func (UnimplementedBackupServiceServer) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateBackupJob not implemented") +} +func (UnimplementedBackupServiceServer) ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListBackupJobs not implemented") +} +func (UnimplementedBackupServiceServer) GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) { + return nil, status.Error(codes.Unimplemented, "method GetBackupJob not implemented") +} +func (UnimplementedBackupServiceServer) mustEmbedUnimplementedBackupServiceServer() {} +func (UnimplementedBackupServiceServer) testEmbeddedByValue() {} + +// UnsafeBackupServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to BackupServiceServer will +// result in compilation errors. +type UnsafeBackupServiceServer interface { + mustEmbedUnimplementedBackupServiceServer() +} + +func RegisterBackupServiceServer(s grpc.ServiceRegistrar, srv BackupServiceServer) { + // If the following call panics, it indicates UnimplementedBackupServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + s.RegisterService(&BackupService_ServiceDesc, srv) +} + +func _BackupService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HealthRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).Health(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_Health_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).Health(ctx, req.(*HealthRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).GetConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_GetConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).UpdateConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_UpdateConfig_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ValidateS3Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ValidateS3(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ValidateS3_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ValidateS3(ctx, req.(*ValidateS3Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_CreateBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateBackupJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).CreateBackupJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_CreateBackupJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).CreateBackupJob(ctx, req.(*CreateBackupJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_ListBackupJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListBackupJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListBackupJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListBackupJobs_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListBackupJobs(ctx, req.(*ListBackupJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_GetBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBackupJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).GetBackupJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_GetBackupJob_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).GetBackupJob(ctx, req.(*GetBackupJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +// BackupService_ServiceDesc is the grpc.ServiceDesc for BackupService service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var BackupService_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "backup.v1.BackupService", + HandlerType: (*BackupServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Health", + Handler: _BackupService_Health_Handler, + }, + { + MethodName: "GetConfig", + Handler: _BackupService_GetConfig_Handler, + }, + { + MethodName: "UpdateConfig", + Handler: _BackupService_UpdateConfig_Handler, + }, + { + MethodName: "ValidateS3", + Handler: _BackupService_ValidateS3_Handler, + }, + { + MethodName: "CreateBackupJob", + Handler: _BackupService_CreateBackupJob_Handler, + }, + { + MethodName: "ListBackupJobs", + Handler: _BackupService_ListBackupJobs_Handler, + }, + { + MethodName: "GetBackupJob", + Handler: _BackupService_GetBackupJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "proto/backup/v1/backup.proto", +} diff --git a/deploy/BACKUPD_CN.md b/deploy/BACKUPD_CN.md new file mode 100644 index 000000000..e023cc1a8 --- /dev/null +++ b/deploy/BACKUPD_CN.md @@ -0,0 +1,78 @@ +# backupd 部署说明(数据管理) + +本文说明如何在宿主机部署 `backupd`,并与主进程联动开启“数据管理”功能。 + +## 1. 关键约束 + +- 主进程固定探测路径:`/tmp/sub2api-backup.sock` +- 仅当该 Unix Socket 可连通且 `Health` 成功时,后台“数据管理”才会启用 +- `backupd` 使用 SQLite 持久化元数据,不依赖主库 + +## 2. 宿主机构建与运行 + +```bash +cd /opt/sub2api-src/backup +go build -o /opt/sub2api/backupd ./cmd/backupd + +mkdir -p /var/lib/sub2api/backup +chown -R sub2api:sub2api /var/lib/sub2api/backup +``` + +手动启动示例: + +```bash +/opt/sub2api/backupd \ + -socket-path /tmp/sub2api-backup.sock \ + -sqlite-path /var/lib/sub2api/backup/backupd.db \ + -version 1.0.0 +``` + +## 3. systemd 托管(推荐) + +仓库已提供示例服务文件:`deploy/sub2api-backupd.service` + +```bash +sudo cp deploy/sub2api-backupd.service /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl enable --now sub2api-backupd +sudo systemctl status sub2api-backupd +``` + +查看日志: + +```bash +sudo journalctl -u sub2api-backupd -f +``` + +也可以使用一键安装脚本(自动安装二进制 + 注册 systemd): + +```bash +# 方式一:使用现成二进制 +sudo ./deploy/install-backupd.sh --binary /path/to/backupd + +# 方式二:从源码构建后安装 +sudo ./deploy/install-backupd.sh --source /path/to/sub2api +``` + +## 4. Docker 部署联动 + +若 `sub2api` 运行在 Docker 容器中,需要将宿主机 Socket 挂载到容器同路径: + +```yaml +services: + sub2api: + volumes: + - /tmp/sub2api-backup.sock:/tmp/sub2api-backup.sock +``` + +建议在 `docker-compose.override.yml` 中维护该挂载,避免覆盖主 compose 文件。 + +## 5. 依赖检查 + +`backupd` 执行备份时依赖以下工具: + +- `pg_dump` +- `redis-cli` +- `docker`(仅 `source_mode=docker_exec` 时) + +缺失依赖会导致对应任务失败,并在任务详情中体现错误信息。 diff --git a/deploy/README.md b/deploy/README.md index 3292e81a1..f5b53ec2f 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -19,7 +19,10 @@ This directory contains files for deploying Sub2API on Linux servers. | `.env.example` | Docker environment variables template | | `DOCKER.md` | Docker Hub documentation | | `install.sh` | One-click binary installation script | +| `install-backupd.sh` | backupd 一键安装脚本 | | `sub2api.service` | Systemd service unit file | +| `sub2api-backupd.service` | backupd systemd service unit file | +| `BACKUPD_CN.md` | backupd 部署与联动说明(中文) | | `config.example.yaml` | Example configuration file | --- @@ -145,6 +148,14 @@ SELECT (SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count; ``` +### backupd(数据管理)联动 + +如需启用管理后台“数据管理”功能,请额外部署宿主机 `backupd`: + +- 主进程固定探测 `/tmp/sub2api-backup.sock` +- Docker 场景下需把宿主机 Socket 挂载到容器内同路径 +- 详细步骤见:`deploy/BACKUPD_CN.md` + ### Commands For **local directory version** (docker-compose.local.yml): diff --git a/deploy/docker-compose.override.yml.example b/deploy/docker-compose.override.yml.example index 297724f5d..a7350471b 100644 --- a/deploy/docker-compose.override.yml.example +++ b/deploy/docker-compose.override.yml.example @@ -127,6 +127,19 @@ services: # - ./logs:/app/logs # - ./backups:/app/backups +# ============================================================================= +# Scenario 6: 启用宿主机 backupd(数据管理) +# ============================================================================= +# 说明: +# - backupd 运行在宿主机(systemd 或手动) +# - 主进程固定探测 /tmp/sub2api-backup.sock +# - 需要把宿主机 socket 挂载到容器内同路径 +# +# services: +# sub2api: +# volumes: +# - /tmp/sub2api-backup.sock:/tmp/sub2api-backup.sock + # ============================================================================= # Additional Notes # ============================================================================= diff --git a/deploy/install-backupd.sh b/deploy/install-backupd.sh new file mode 100755 index 000000000..340ee396f --- /dev/null +++ b/deploy/install-backupd.sh @@ -0,0 +1,123 @@ +#!/usr/bin/env bash + +set -euo pipefail + +# 用法: +# sudo ./install-backupd.sh --binary /path/to/backupd +# 或: +# sudo ./install-backupd.sh --source /path/to/sub2api/repo + +BIN_PATH="" +SOURCE_PATH="" +INSTALL_DIR="/opt/sub2api" +DATA_DIR="/var/lib/sub2api/backup" +SERVICE_FILE_NAME="sub2api-backupd.service" + +function print_help() { + cat <<'EOF' +用法: + install-backupd.sh [--binary ] [--source <仓库路径>] + +参数: + --binary 指定已构建的 backupd 二进制路径 + --source 指定 sub2api 仓库路径(脚本会执行 go build) + -h, --help 显示帮助 + +示例: + sudo ./install-backupd.sh --binary ./backup/backupd + sudo ./install-backupd.sh --source /opt/sub2api-src +EOF +} + +while [[ $# -gt 0 ]]; do + case "$1" in + --binary) + BIN_PATH="${2:-}" + shift 2 + ;; + --source) + SOURCE_PATH="${2:-}" + shift 2 + ;; + -h|--help) + print_help + exit 0 + ;; + *) + echo "未知参数: $1" + print_help + exit 1 + ;; + esac +done + +if [[ -n "$BIN_PATH" && -n "$SOURCE_PATH" ]]; then + echo "错误: --binary 与 --source 只能二选一" + exit 1 +fi + +if [[ -z "$BIN_PATH" && -z "$SOURCE_PATH" ]]; then + echo "错误: 必须提供 --binary 或 --source" + exit 1 +fi + +if [[ "$(id -u)" -ne 0 ]]; then + echo "错误: 请使用 root 权限执行(例如 sudo)" + exit 1 +fi + +if [[ -n "$SOURCE_PATH" ]]; then + if [[ ! -d "$SOURCE_PATH/backup" ]]; then + echo "错误: 无效仓库路径,未找到 $SOURCE_PATH/backup" + exit 1 + fi + echo "[1/6] 从源码构建 backupd..." + (cd "$SOURCE_PATH/backup" && go build -o backupd ./cmd/backupd) + BIN_PATH="$SOURCE_PATH/backup/backupd" +fi + +if [[ ! -f "$BIN_PATH" ]]; then + echo "错误: 二进制文件不存在: $BIN_PATH" + exit 1 +fi + +if ! id sub2api >/dev/null 2>&1; then + echo "[2/6] 创建系统用户 sub2api..." + useradd --system --no-create-home --shell /usr/sbin/nologin sub2api +else + echo "[2/6] 系统用户 sub2api 已存在,跳过创建" +fi + +echo "[3/6] 安装 backupd 二进制..." +mkdir -p "$INSTALL_DIR" +install -m 0755 "$BIN_PATH" "$INSTALL_DIR/backupd" + +echo "[4/6] 准备数据目录..." +mkdir -p "$DATA_DIR" +chown -R sub2api:sub2api /var/lib/sub2api +chmod 0750 "$DATA_DIR" + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +SERVICE_TEMPLATE="$SCRIPT_DIR/$SERVICE_FILE_NAME" +if [[ ! -f "$SERVICE_TEMPLATE" ]]; then + echo "错误: 未找到服务模板 $SERVICE_TEMPLATE" + exit 1 +fi + +echo "[5/6] 安装 systemd 服务..." +cp "$SERVICE_TEMPLATE" "/etc/systemd/system/$SERVICE_FILE_NAME" +systemctl daemon-reload +systemctl enable --now sub2api-backupd + +echo "[6/6] 完成,当前状态:" +systemctl --no-pager --full status sub2api-backupd || true + +cat <<'EOF' + +下一步建议: +1. 查看日志:sudo journalctl -u sub2api-backupd -f +2. 在 sub2api(容器部署时)挂载 socket: + /tmp/sub2api-backup.sock:/tmp/sub2api-backup.sock +3. 进入管理后台“数据管理”页面确认 agent=enabled + +EOF diff --git a/deploy/sub2api-backupd.service b/deploy/sub2api-backupd.service new file mode 100644 index 000000000..52d91f2da --- /dev/null +++ b/deploy/sub2api-backupd.service @@ -0,0 +1,22 @@ +[Unit] +Description=Sub2API Backup Daemon +After=network.target +Wants=network.target + +[Service] +Type=simple +User=sub2api +Group=sub2api +WorkingDirectory=/opt/sub2api +ExecStart=/opt/sub2api/backupd \ + -socket-path /tmp/sub2api-backup.sock \ + -sqlite-path /var/lib/sub2api/backup/backupd.db \ + -version 1.0.0 +Restart=always +RestartSec=5s +LimitNOFILE=100000 +NoNewPrivileges=true +PrivateTmp=false + +[Install] +WantedBy=multi-user.target diff --git a/frontend/src/api/admin/dataManagement.ts b/frontend/src/api/admin/dataManagement.ts new file mode 100644 index 000000000..d58b0a11f --- /dev/null +++ b/frontend/src/api/admin/dataManagement.ts @@ -0,0 +1,181 @@ +import { apiClient } from '../client' + +export type BackupType = 'postgres' | 'redis' | 'full' +export type BackupJobStatus = 'queued' | 'running' | 'succeeded' | 'failed' | 'partial_succeeded' + +export interface BackupAgentInfo { + status: string + version: string + uptime_seconds: number +} + +export interface BackupAgentHealth { + enabled: boolean + reason: string + socket_path: string + agent?: BackupAgentInfo +} + +export interface DataManagementPostgresConfig { + host: string + port: number + user: string + password?: string + password_configured?: boolean + database: string + ssl_mode: string + container_name: string +} + +export interface DataManagementRedisConfig { + addr: string + username: string + password?: string + password_configured?: boolean + db: number + container_name: string +} + +export interface DataManagementS3Config { + enabled: boolean + endpoint: string + region: string + bucket: string + access_key_id: string + secret_access_key?: string + secret_access_key_configured?: boolean + prefix: string + force_path_style: boolean + use_ssl: boolean +} + +export interface DataManagementConfig { + source_mode: 'direct' | 'docker_exec' + backup_root: string + sqlite_path?: string + retention_days: number + keep_last: number + postgres: DataManagementPostgresConfig + redis: DataManagementRedisConfig + s3: DataManagementS3Config +} + +export interface TestS3Request { + endpoint: string + region: string + bucket: string + access_key_id: string + secret_access_key: string + prefix?: string + force_path_style?: boolean + use_ssl?: boolean +} + +export interface TestS3Response { + ok: boolean + message: string +} + +export interface CreateBackupJobRequest { + backup_type: BackupType + upload_to_s3?: boolean + idempotency_key?: string +} + +export interface CreateBackupJobResponse { + job_id: string + status: BackupJobStatus +} + +export interface BackupArtifactInfo { + local_path: string + size_bytes: number + sha256: string +} + +export interface BackupS3Info { + bucket: string + key: string + etag: string +} + +export interface BackupJob { + job_id: string + backup_type: BackupType + status: BackupJobStatus + triggered_by: string + started_at?: string + finished_at?: string + error_message?: string + artifact?: BackupArtifactInfo + s3?: BackupS3Info +} + +export interface ListBackupJobsRequest { + page_size?: number + page_token?: string + status?: BackupJobStatus + backup_type?: BackupType +} + +export interface ListBackupJobsResponse { + items: BackupJob[] + next_page_token?: string +} + +export async function getAgentHealth(): Promise { + const { data } = await apiClient.get('/admin/data-management/agent/health') + return data +} + +export async function getConfig(): Promise { + const { data } = await apiClient.get('/admin/data-management/config') + return data +} + +export async function updateConfig(request: DataManagementConfig): Promise { + const { data } = await apiClient.put('/admin/data-management/config', request) + return data +} + +export async function testS3(request: TestS3Request): Promise { + const { data } = await apiClient.post('/admin/data-management/s3/test', request) + return data +} + +export async function createBackupJob(request: CreateBackupJobRequest): Promise { + const headers = request.idempotency_key + ? { 'X-Idempotency-Key': request.idempotency_key } + : undefined + + const { data } = await apiClient.post( + '/admin/data-management/backups', + request, + { headers } + ) + return data +} + +export async function listBackupJobs(request?: ListBackupJobsRequest): Promise { + const { data } = await apiClient.get('/admin/data-management/backups', { + params: request + }) + return data +} + +export async function getBackupJob(jobID: string): Promise { + const { data } = await apiClient.get(`/admin/data-management/backups/${jobID}`) + return data +} + +export const dataManagementAPI = { + getAgentHealth, + getConfig, + updateConfig, + testS3, + createBackupJob, + listBackupJobs, + getBackupJob +} + +export default dataManagementAPI diff --git a/frontend/src/api/admin/index.ts b/frontend/src/api/admin/index.ts index ffb9b1799..1a19fa000 100644 --- a/frontend/src/api/admin/index.ts +++ b/frontend/src/api/admin/index.ts @@ -20,6 +20,7 @@ import antigravityAPI from './antigravity' import userAttributesAPI from './userAttributes' import opsAPI from './ops' import errorPassthroughAPI from './errorPassthrough' +import dataManagementAPI from './dataManagement' /** * Unified admin API object for convenient access @@ -41,7 +42,8 @@ export const adminAPI = { antigravity: antigravityAPI, userAttributes: userAttributesAPI, ops: opsAPI, - errorPassthrough: errorPassthroughAPI + errorPassthrough: errorPassthroughAPI, + dataManagement: dataManagementAPI } export { @@ -61,7 +63,8 @@ export { antigravityAPI, userAttributesAPI, opsAPI, - errorPassthroughAPI + errorPassthroughAPI, + dataManagementAPI } export default adminAPI @@ -69,3 +72,4 @@ export default adminAPI // Re-export types used by components export type { BalanceHistoryItem } from './users' export type { ErrorPassthroughRule, CreateRuleRequest, UpdateRuleRequest } from './errorPassthrough' +export type { BackupAgentHealth, DataManagementConfig } from './dataManagement' diff --git a/frontend/src/components/layout/AppSidebar.vue b/frontend/src/components/layout/AppSidebar.vue index e5afde9c1..629f99deb 100644 --- a/frontend/src/components/layout/AppSidebar.vue +++ b/frontend/src/components/layout/AppSidebar.vue @@ -320,6 +320,36 @@ const ServerIcon = { ) } +const DatabaseIcon = { + render: () => + h( + 'svg', + { fill: 'none', viewBox: '0 0 24 24', stroke: 'currentColor', 'stroke-width': '1.5' }, + [ + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M3.75 5.25C3.75 4.007 7.443 3 12 3s8.25 1.007 8.25 2.25S16.557 7.5 12 7.5 3.75 6.493 3.75 5.25z' + }), + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M3.75 5.25v4.5C3.75 10.993 7.443 12 12 12s8.25-1.007 8.25-2.25v-4.5' + }), + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M3.75 9.75v4.5c0 1.243 3.693 2.25 8.25 2.25s8.25-1.007 8.25-2.25v-4.5' + }), + h('path', { + 'stroke-linecap': 'round', + 'stroke-linejoin': 'round', + d: 'M3.75 14.25v4.5C3.75 19.993 7.443 21 12 21s8.25-1.007 8.25-2.25v-4.5' + }) + ] + ) +} + const BellIcon = { render: () => h( @@ -490,17 +520,19 @@ const adminNavItems = computed(() => { { path: '/admin/proxies', label: t('nav.proxies'), icon: ServerIcon }, { path: '/admin/redeem', label: t('nav.redeemCodes'), icon: TicketIcon, hideInSimpleMode: true }, { path: '/admin/promo-codes', label: t('nav.promoCodes'), icon: GiftIcon, hideInSimpleMode: true }, - { path: '/admin/usage', label: t('nav.usage'), icon: ChartIcon }, + { path: '/admin/usage', label: t('nav.usage'), icon: ChartIcon } ] // 简单模式下,在系统设置前插入 API密钥 if (authStore.isSimpleMode) { const filtered = baseItems.filter(item => !item.hideInSimpleMode) filtered.push({ path: '/keys', label: t('nav.apiKeys'), icon: KeyIcon }) + filtered.push({ path: '/admin/data-management', label: t('nav.dataManagement'), icon: DatabaseIcon }) filtered.push({ path: '/admin/settings', label: t('nav.settings'), icon: CogIcon }) return filtered } + baseItems.push({ path: '/admin/data-management', label: t('nav.dataManagement'), icon: DatabaseIcon }) baseItems.push({ path: '/admin/settings', label: t('nav.settings'), icon: CogIcon }) return baseItems }) diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 3c415989c..c4380f9a1 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -270,6 +270,7 @@ export default { redeemCodes: 'Redeem Codes', ops: 'Ops', promoCodes: 'Promo Codes', + dataManagement: 'Data Management', settings: 'Settings', myAccount: 'My Account', lightMode: 'Light Mode', @@ -839,6 +840,117 @@ export default { failedToLoad: 'Failed to load dashboard statistics' }, + dataManagement: { + title: 'Data Management', + description: 'Manage backup agent status, object storage settings, and backup jobs in one place', + agent: { + title: 'Backup Agent Status', + description: 'The system probes a fixed Unix socket and enables data management only when reachable.', + enabled: 'Backup agent is ready. Data management operations are available.', + disabled: 'Backup agent is unavailable. Only diagnostic information is available now.', + socketPath: 'Socket Path', + version: 'Version', + status: 'Status', + uptime: 'Uptime', + reasonLabel: 'Unavailable Reason', + reason: { + BACKUP_AGENT_SOCKET_MISSING: 'Backup socket file is missing', + BACKUP_AGENT_UNAVAILABLE: 'Backup agent is unreachable', + UNKNOWN: 'Unknown reason' + } + }, + sections: { + config: { + title: 'Backup Configuration', + description: 'Configure backup source, retention policy, and S3 settings.' + }, + s3: { + title: 'S3 Object Storage', + description: 'Configure and test uploads of backup artifacts to a standard S3-compatible storage.' + }, + backup: { + title: 'Backup Operations', + description: 'Trigger PostgreSQL, Redis, and full backup jobs.' + }, + history: { + title: 'Backup History', + description: 'Review backup job status, errors, and artifact metadata.' + } + }, + form: { + sourceMode: 'Source Mode', + backupRoot: 'Backup Root', + retentionDays: 'Retention Days', + keepLast: 'Keep Last Jobs', + uploadToS3: 'Upload to S3', + idempotencyKey: 'Idempotency Key (Optional)', + secretConfigured: 'Configured already, leave empty to keep unchanged', + postgres: { + title: 'PostgreSQL', + host: 'Host', + port: 'Port', + user: 'User', + password: 'Password', + database: 'Database', + sslMode: 'SSL Mode', + containerName: 'Container Name (docker_exec mode)' + }, + redis: { + title: 'Redis', + addr: 'Address (host:port)', + username: 'Username', + password: 'Password', + db: 'Database Index', + containerName: 'Container Name (docker_exec mode)' + }, + s3: { + enabled: 'Enable S3 Upload', + endpoint: 'Endpoint (Optional)', + region: 'Region', + bucket: 'Bucket', + accessKeyID: 'Access Key ID', + secretAccessKey: 'Secret Access Key', + prefix: 'Object Prefix', + forcePathStyle: 'Force Path Style', + useSSL: 'Use SSL' + } + }, + history: { + total: '{count} jobs', + empty: 'No backup jobs yet', + columns: { + jobID: 'Job ID', + type: 'Type', + status: 'Status', + triggeredBy: 'Triggered By', + finishedAt: 'Finished At', + artifact: 'Artifact', + error: 'Error' + }, + status: { + queued: 'Queued', + running: 'Running', + succeeded: 'Succeeded', + failed: 'Failed', + partial_succeeded: 'Partial Succeeded' + } + }, + actions: { + refresh: 'Refresh Status', + disabledHint: 'Start backupd first and ensure the socket is reachable.', + reloadConfig: 'Reload Config', + saveConfig: 'Save Config', + configSaved: 'Configuration saved', + testS3: 'Test S3 Connection', + s3TestOK: 'S3 connection test succeeded', + s3TestFailed: 'S3 connection test failed', + createBackup: 'Create Backup Job', + jobCreated: 'Backup job created: {jobID} ({status})', + refreshJobs: 'Refresh Jobs', + loadMore: 'Load More' + } + }, + // Users users: { title: 'User Management', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 770f9ca97..4593193ec 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -270,6 +270,7 @@ export default { redeemCodes: '兑换码', ops: '运维监控', promoCodes: '优惠码', + dataManagement: '数据管理', settings: '系统设置', myAccount: '我的账户', lightMode: '浅色模式', @@ -862,6 +863,117 @@ export default { failedToLoad: '加载仪表盘数据失败' }, + dataManagement: { + title: '数据管理', + description: '统一管理备份代理状态、对象存储配置和备份任务', + agent: { + title: '备份代理状态', + description: '系统会自动探测固定 Unix Socket,仅在可连通时启用数据管理功能。', + enabled: '备份代理已就绪,可继续进行数据管理操作。', + disabled: '备份代理不可用,当前仅可查看诊断信息。', + socketPath: 'Socket 路径', + version: '版本', + status: '状态', + uptime: '运行时长', + reasonLabel: '不可用原因', + reason: { + BACKUP_AGENT_SOCKET_MISSING: '未检测到备份 Socket 文件', + BACKUP_AGENT_UNAVAILABLE: '备份代理不可连通', + UNKNOWN: '未知原因' + } + }, + sections: { + config: { + title: '备份配置', + description: '配置备份源、保留策略与 S3 存储参数。' + }, + s3: { + title: 'S3 对象存储', + description: '配置并测试备份产物上传到标准 S3 对象存储。' + }, + backup: { + title: '备份操作', + description: '触发 PostgreSQL、Redis 与全量备份任务。' + }, + history: { + title: '备份历史', + description: '查看备份任务执行状态、错误与产物信息。' + } + }, + form: { + sourceMode: '源模式', + backupRoot: '备份根目录', + retentionDays: '保留天数', + keepLast: '至少保留最近任务数', + uploadToS3: '上传到 S3', + idempotencyKey: '幂等键(可选)', + secretConfigured: '已配置,留空不变', + postgres: { + title: 'PostgreSQL', + host: '主机', + port: '端口', + user: '用户名', + password: '密码', + database: '数据库', + sslMode: 'SSL 模式', + containerName: '容器名(docker_exec 模式)' + }, + redis: { + title: 'Redis', + addr: '地址(host:port)', + username: '用户名', + password: '密码', + db: '数据库编号', + containerName: '容器名(docker_exec 模式)' + }, + s3: { + enabled: '启用 S3 上传', + endpoint: 'Endpoint(可选)', + region: 'Region', + bucket: 'Bucket', + accessKeyID: 'Access Key ID', + secretAccessKey: 'Secret Access Key', + prefix: '对象前缀', + forcePathStyle: '强制 path-style', + useSSL: '使用 SSL' + } + }, + history: { + total: '共 {count} 条', + empty: '暂无备份任务', + columns: { + jobID: '任务 ID', + type: '类型', + status: '状态', + triggeredBy: '触发人', + finishedAt: '完成时间', + artifact: '产物', + error: '错误' + }, + status: { + queued: '排队中', + running: '执行中', + succeeded: '成功', + failed: '失败', + partial_succeeded: '部分成功' + } + }, + actions: { + refresh: '刷新状态', + disabledHint: '请先启动 backupd 并确认 Socket 可连通。', + reloadConfig: '加载配置', + saveConfig: '保存配置', + configSaved: '配置保存成功', + testS3: '测试 S3 连接', + s3TestOK: 'S3 连接测试成功', + s3TestFailed: 'S3 连接测试失败', + createBackup: '创建备份任务', + jobCreated: '备份任务已创建:{jobID}({status})', + refreshJobs: '刷新任务', + loadMore: '加载更多' + } + }, + // Users Management users: { title: '用户管理', diff --git a/frontend/src/router/index.ts b/frontend/src/router/index.ts index 1a67cac63..278636790 100644 --- a/frontend/src/router/index.ts +++ b/frontend/src/router/index.ts @@ -314,6 +314,18 @@ const routes: RouteRecordRaw[] = [ descriptionKey: 'admin.promo.description' } }, + { + path: '/admin/data-management', + name: 'AdminDataManagement', + component: () => import('@/views/admin/DataManagementView.vue'), + meta: { + requiresAuth: true, + requiresAdmin: true, + title: 'Data Management', + titleKey: 'admin.dataManagement.title', + descriptionKey: 'admin.dataManagement.description' + } + }, { path: '/admin/settings', name: 'AdminSettings', diff --git a/frontend/src/views/admin/DataManagementView.vue b/frontend/src/views/admin/DataManagementView.vue new file mode 100644 index 000000000..ab38751ee --- /dev/null +++ b/frontend/src/views/admin/DataManagementView.vue @@ -0,0 +1,580 @@ + + + From ba76d3eb57d6d1af14fd4c874f8d192a4e93b4cc Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Tue, 24 Feb 2026 21:28:34 +0800 Subject: [PATCH 003/120] =?UTF-8?q?feat(data-management):=20=E6=94=AF?= =?UTF-8?q?=E6=8C=81PG/Redis=E5=A4=9A=E9=85=8D=E7=BD=AE=E7=AE=A1=E7=90=86?= =?UTF-8?q?=E4=B8=8E=E5=A4=87=E4=BB=BD=E4=BB=BB=E5=8A=A1=E6=8C=89=E9=85=8D?= =?UTF-8?q?=E7=BD=AE=E9=80=89=E6=8B=A9?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../backup/proto/backup/v1/backup.pb.go | 1956 ++++++++++++++--- .../backup/proto/backup/v1/backup.proto | 128 ++ .../backup/proto/backup/v1/backup_grpc.pb.go | 398 +++- .../handler/admin/data_management_handler.go | 298 +++ backend/internal/server/routes/admin.go | 10 + .../internal/service/data_management_grpc.go | 460 +++- .../service/data_management_grpc_test.go | 6 + backup/ent/backupjob.go | 35 +- backup/ent/backupjob/backupjob.go | 24 + backup/ent/backupjob/where.go | 240 ++ backup/ent/backupjob_create.go | 54 + backup/ent/backupjob_update.go | 156 ++ backup/ent/backups3config.go | 37 +- backup/ent/backups3config/backups3config.go | 30 + backup/ent/backups3config/where.go | 155 ++ backup/ent/backups3config_create.go | 75 + backup/ent/backups3config_query.go | 8 +- backup/ent/backups3config_update.go | 102 + backup/ent/backupsourceconfig.go | 37 +- .../backupsourceconfig/backupsourceconfig.go | 26 + backup/ent/backupsourceconfig/where.go | 155 ++ backup/ent/backupsourceconfig_create.go | 51 + backup/ent/backupsourceconfig_update.go | 102 + backup/ent/migrate/schema.go | 49 +- backup/ent/mutation.go | 549 ++++- backup/ent/runtime.go | 46 +- backup/ent/schema/backup_job.go | 6 + backup/ent/schema/backup_s3_config.go | 11 + backup/ent/schema/backup_source_config.go | 6 +- backup/go.sum | 12 + backup/internal/executor/runner.go | 75 +- backup/internal/grpcserver/server.go | 469 +++- backup/internal/store/entstore/store.go | 1332 ++++++++++- backup/internal/store/entstore/store_test.go | 182 ++ backup/proto/backup/v1/backup.pb.go | 1888 +++++++++++++--- backup/proto/backup/v1/backup.proto | 128 ++ backup/proto/backup/v1/backup_grpc.pb.go | 394 +++- frontend/src/api/admin/dataManagement.ts | 151 ++ frontend/src/i18n/locales/en.ts | 64 +- frontend/src/i18n/locales/zh.ts | 64 +- .../src/views/admin/DataManagementView.vue | 1130 +++++++++- 41 files changed, 10230 insertions(+), 869 deletions(-) diff --git a/backend/internal/backup/proto/backup/v1/backup.pb.go b/backend/internal/backup/proto/backup/v1/backup.pb.go index 180cb669b..2ae5669a2 100644 --- a/backend/internal/backup/proto/backup/v1/backup.pb.go +++ b/backend/internal/backup/proto/backup/v1/backup.pb.go @@ -2,7 +2,7 @@ // versions: // protoc-gen-go v1.36.10 // protoc v6.32.1 -// source: internal/backup/proto/backup/v1/backup.proto +// source: proto/backup/v1/backup.proto package backupv1 @@ -29,7 +29,7 @@ type HealthRequest struct { func (x *HealthRequest) Reset() { *x = HealthRequest{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[0] + mi := &file_proto_backup_v1_backup_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -41,7 +41,7 @@ func (x *HealthRequest) String() string { func (*HealthRequest) ProtoMessage() {} func (x *HealthRequest) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[0] + mi := &file_proto_backup_v1_backup_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -54,7 +54,7 @@ func (x *HealthRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead. func (*HealthRequest) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{0} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{0} } type HealthResponse struct { @@ -68,7 +68,7 @@ type HealthResponse struct { func (x *HealthResponse) Reset() { *x = HealthResponse{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[1] + mi := &file_proto_backup_v1_backup_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -80,7 +80,7 @@ func (x *HealthResponse) String() string { func (*HealthResponse) ProtoMessage() {} func (x *HealthResponse) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[1] + mi := &file_proto_backup_v1_backup_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -93,7 +93,7 @@ func (x *HealthResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. func (*HealthResponse) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{1} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{1} } func (x *HealthResponse) GetStatus() string { @@ -135,7 +135,7 @@ type SourceConfig struct { func (x *SourceConfig) Reset() { *x = SourceConfig{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[2] + mi := &file_proto_backup_v1_backup_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -147,7 +147,7 @@ func (x *SourceConfig) String() string { func (*SourceConfig) ProtoMessage() {} func (x *SourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[2] + mi := &file_proto_backup_v1_backup_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -160,7 +160,7 @@ func (x *SourceConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use SourceConfig.ProtoReflect.Descriptor instead. func (*SourceConfig) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{2} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{2} } func (x *SourceConfig) GetHost() string { @@ -250,7 +250,7 @@ type S3Config struct { func (x *S3Config) Reset() { *x = S3Config{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[3] + mi := &file_proto_backup_v1_backup_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -262,7 +262,7 @@ func (x *S3Config) String() string { func (*S3Config) ProtoMessage() {} func (x *S3Config) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[3] + mi := &file_proto_backup_v1_backup_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -275,7 +275,7 @@ func (x *S3Config) ProtoReflect() protoreflect.Message { // Deprecated: Use S3Config.ProtoReflect.Descriptor instead. func (*S3Config) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{3} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{3} } func (x *S3Config) GetEnabled() bool { @@ -342,34 +342,1110 @@ func (x *S3Config) GetUseSsl() bool { } type BackupConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` + BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` + SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` + RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` + KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` + Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` + Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` + S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` + ActiveS3ProfileId string `protobuf:"bytes,9,opt,name=active_s3_profile_id,json=activeS3ProfileId,proto3" json:"active_s3_profile_id,omitempty"` + ActivePostgresProfileId string `protobuf:"bytes,10,opt,name=active_postgres_profile_id,json=activePostgresProfileId,proto3" json:"active_postgres_profile_id,omitempty"` + ActiveRedisProfileId string `protobuf:"bytes,11,opt,name=active_redis_profile_id,json=activeRedisProfileId,proto3" json:"active_redis_profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupConfig) Reset() { + *x = BackupConfig{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupConfig) ProtoMessage() {} + +func (x *BackupConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. +func (*BackupConfig) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} +} + +func (x *BackupConfig) GetSourceMode() string { + if x != nil { + return x.SourceMode + } + return "" +} + +func (x *BackupConfig) GetBackupRoot() string { + if x != nil { + return x.BackupRoot + } + return "" +} + +func (x *BackupConfig) GetSqlitePath() string { + if x != nil { + return x.SqlitePath + } + return "" +} + +func (x *BackupConfig) GetRetentionDays() int32 { + if x != nil { + return x.RetentionDays + } + return 0 +} + +func (x *BackupConfig) GetKeepLast() int32 { + if x != nil { + return x.KeepLast + } + return 0 +} + +func (x *BackupConfig) GetPostgres() *SourceConfig { + if x != nil { + return x.Postgres + } + return nil +} + +func (x *BackupConfig) GetRedis() *SourceConfig { + if x != nil { + return x.Redis + } + return nil +} + +func (x *BackupConfig) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +func (x *BackupConfig) GetActiveS3ProfileId() string { + if x != nil { + return x.ActiveS3ProfileId + } + return "" +} + +func (x *BackupConfig) GetActivePostgresProfileId() string { + if x != nil { + return x.ActivePostgresProfileId + } + return "" +} + +func (x *BackupConfig) GetActiveRedisProfileId() string { + if x != nil { + return x.ActiveRedisProfileId + } + return "" +} + +type GetConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigRequest) Reset() { + *x = GetConfigRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigRequest) ProtoMessage() {} + +func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. +func (*GetConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} +} + +type GetConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigResponse) Reset() { + *x = GetConfigResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigResponse) ProtoMessage() {} + +func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. +func (*GetConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} +} + +func (x *GetConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigRequest) Reset() { + *x = UpdateConfigRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigRequest) ProtoMessage() {} + +func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateConfigRequest) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigResponse) Reset() { + *x = UpdateConfigResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigResponse) ProtoMessage() {} + +func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type SourceProfile struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + IsActive bool `protobuf:"varint,4,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` + Config *SourceConfig `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` + PasswordConfigured bool `protobuf:"varint,6,opt,name=password_configured,json=passwordConfigured,proto3" json:"password_configured,omitempty"` + CreatedAt string `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt string `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceProfile) Reset() { + *x = SourceProfile{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceProfile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceProfile) ProtoMessage() {} + +func (x *SourceProfile) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceProfile.ProtoReflect.Descriptor instead. +func (*SourceProfile) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} +} + +func (x *SourceProfile) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *SourceProfile) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *SourceProfile) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SourceProfile) GetIsActive() bool { + if x != nil { + return x.IsActive + } + return false +} + +func (x *SourceProfile) GetConfig() *SourceConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *SourceProfile) GetPasswordConfigured() bool { + if x != nil { + return x.PasswordConfigured + } + return false +} + +func (x *SourceProfile) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +func (x *SourceProfile) GetUpdatedAt() string { + if x != nil { + return x.UpdatedAt + } + return "" +} + +type ListSourceProfilesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSourceProfilesRequest) Reset() { + *x = ListSourceProfilesRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSourceProfilesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSourceProfilesRequest) ProtoMessage() {} + +func (x *ListSourceProfilesRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSourceProfilesRequest.ProtoReflect.Descriptor instead. +func (*ListSourceProfilesRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} +} + +func (x *ListSourceProfilesRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +type ListSourceProfilesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*SourceProfile `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSourceProfilesResponse) Reset() { + *x = ListSourceProfilesResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSourceProfilesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSourceProfilesResponse) ProtoMessage() {} + +func (x *ListSourceProfilesResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSourceProfilesResponse.ProtoReflect.Descriptor instead. +func (*ListSourceProfilesResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} +} + +func (x *ListSourceProfilesResponse) GetItems() []*SourceProfile { + if x != nil { + return x.Items + } + return nil +} + +type CreateSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Config *SourceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + SetActive bool `protobuf:"varint,5,opt,name=set_active,json=setActive,proto3" json:"set_active,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSourceProfileRequest) Reset() { + *x = CreateSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSourceProfileRequest) ProtoMessage() {} + +func (x *CreateSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*CreateSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *CreateSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *CreateSourceProfileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateSourceProfileRequest) GetConfig() *SourceConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *CreateSourceProfileRequest) GetSetActive() bool { + if x != nil { + return x.SetActive + } + return false +} + +type CreateSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSourceProfileResponse) Reset() { + *x = CreateSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSourceProfileResponse) ProtoMessage() {} + +func (x *CreateSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*CreateSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} +} + +func (x *CreateSourceProfileResponse) GetProfile() *SourceProfile { + if x != nil { + return x.Profile + } + return nil +} + +type UpdateSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Config *SourceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateSourceProfileRequest) Reset() { + *x = UpdateSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSourceProfileRequest) ProtoMessage() {} + +func (x *UpdateSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*UpdateSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} +} + +func (x *UpdateSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *UpdateSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *UpdateSourceProfileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateSourceProfileRequest) GetConfig() *SourceConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateSourceProfileResponse) Reset() { + *x = UpdateSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSourceProfileResponse) ProtoMessage() {} + +func (x *UpdateSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*UpdateSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} +} + +func (x *UpdateSourceProfileResponse) GetProfile() *SourceProfile { + if x != nil { + return x.Profile + } + return nil +} + +type DeleteSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteSourceProfileRequest) Reset() { + *x = DeleteSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSourceProfileRequest) ProtoMessage() {} + +func (x *DeleteSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*DeleteSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} +} + +func (x *DeleteSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *DeleteSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +type DeleteSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteSourceProfileResponse) Reset() { + *x = DeleteSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSourceProfileResponse) ProtoMessage() {} + +func (x *DeleteSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*DeleteSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} +} + +type SetActiveSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetActiveSourceProfileRequest) Reset() { + *x = SetActiveSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetActiveSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetActiveSourceProfileRequest) ProtoMessage() {} + +func (x *SetActiveSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetActiveSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*SetActiveSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} +} + +func (x *SetActiveSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *SetActiveSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +type SetActiveSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetActiveSourceProfileResponse) Reset() { + *x = SetActiveSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetActiveSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetActiveSourceProfileResponse) ProtoMessage() {} + +func (x *SetActiveSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetActiveSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*SetActiveSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} +} + +func (x *SetActiveSourceProfileResponse) GetProfile() *SourceProfile { + if x != nil { + return x.Profile + } + return nil +} + +type ValidateS3Request struct { + state protoimpl.MessageState `protogen:"open.v1"` + S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Request) Reset() { + *x = ValidateS3Request{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Request) ProtoMessage() {} + +func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. +func (*ValidateS3Request) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{20} +} + +func (x *ValidateS3Request) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type ValidateS3Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Response) Reset() { + *x = ValidateS3Response{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Response) ProtoMessage() {} + +func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. +func (*ValidateS3Response) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{21} +} + +func (x *ValidateS3Response) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *ValidateS3Response) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type S3Profile struct { + state protoimpl.MessageState `protogen:"open.v1"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + IsActive bool `protobuf:"varint,3,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` + S3 *S3Config `protobuf:"bytes,4,opt,name=s3,proto3" json:"s3,omitempty"` + SecretAccessKeyConfigured bool `protobuf:"varint,5,opt,name=secret_access_key_configured,json=secretAccessKeyConfigured,proto3" json:"secret_access_key_configured,omitempty"` + CreatedAt string `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt string `protobuf:"bytes,7,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *S3Profile) Reset() { + *x = S3Profile{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *S3Profile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3Profile) ProtoMessage() {} + +func (x *S3Profile) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3Profile.ProtoReflect.Descriptor instead. +func (*S3Profile) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{22} +} + +func (x *S3Profile) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *S3Profile) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *S3Profile) GetIsActive() bool { + if x != nil { + return x.IsActive + } + return false +} + +func (x *S3Profile) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +func (x *S3Profile) GetSecretAccessKeyConfigured() bool { + if x != nil { + return x.SecretAccessKeyConfigured + } + return false +} + +func (x *S3Profile) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +func (x *S3Profile) GetUpdatedAt() string { + if x != nil { + return x.UpdatedAt + } + return "" +} + +type ListS3ProfilesRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` - BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` - SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` - RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` - KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` - Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` - Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` - S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *BackupConfig) Reset() { - *x = BackupConfig{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[4] +func (x *ListS3ProfilesRequest) Reset() { + *x = ListS3ProfilesRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *BackupConfig) String() string { +func (x *ListS3ProfilesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupConfig) ProtoMessage() {} +func (*ListS3ProfilesRequest) ProtoMessage() {} -func (x *BackupConfig) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[4] +func (x *ListS3ProfilesRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[23] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -380,88 +1456,145 @@ func (x *BackupConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. -func (*BackupConfig) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} +// Deprecated: Use ListS3ProfilesRequest.ProtoReflect.Descriptor instead. +func (*ListS3ProfilesRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{23} } -func (x *BackupConfig) GetSourceMode() string { - if x != nil { - return x.SourceMode - } - return "" +type ListS3ProfilesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*S3Profile `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *BackupConfig) GetBackupRoot() string { +func (x *ListS3ProfilesResponse) Reset() { + *x = ListS3ProfilesResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListS3ProfilesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListS3ProfilesResponse) ProtoMessage() {} + +func (x *ListS3ProfilesResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[24] if x != nil { - return x.BackupRoot + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) } -func (x *BackupConfig) GetSqlitePath() string { +// Deprecated: Use ListS3ProfilesResponse.ProtoReflect.Descriptor instead. +func (*ListS3ProfilesResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{24} +} + +func (x *ListS3ProfilesResponse) GetItems() []*S3Profile { if x != nil { - return x.SqlitePath + return x.Items } - return "" + return nil } -func (x *BackupConfig) GetRetentionDays() int32 { +type CreateS3ProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + S3 *S3Config `protobuf:"bytes,3,opt,name=s3,proto3" json:"s3,omitempty"` + SetActive bool `protobuf:"varint,4,opt,name=set_active,json=setActive,proto3" json:"set_active,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateS3ProfileRequest) Reset() { + *x = CreateS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[25] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateS3ProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateS3ProfileRequest) ProtoMessage() {} + +func (x *CreateS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[25] if x != nil { - return x.RetentionDays + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return 0 + return mi.MessageOf(x) } -func (x *BackupConfig) GetKeepLast() int32 { +// Deprecated: Use CreateS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*CreateS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{25} +} + +func (x *CreateS3ProfileRequest) GetProfileId() string { if x != nil { - return x.KeepLast + return x.ProfileId } - return 0 + return "" } -func (x *BackupConfig) GetPostgres() *SourceConfig { +func (x *CreateS3ProfileRequest) GetName() string { if x != nil { - return x.Postgres + return x.Name } - return nil + return "" } -func (x *BackupConfig) GetRedis() *SourceConfig { +func (x *CreateS3ProfileRequest) GetS3() *S3Config { if x != nil { - return x.Redis + return x.S3 } return nil } -func (x *BackupConfig) GetS3() *S3Config { +func (x *CreateS3ProfileRequest) GetSetActive() bool { if x != nil { - return x.S3 + return x.SetActive } - return nil + return false } -type GetConfigRequest struct { +type CreateS3ProfileResponse struct { state protoimpl.MessageState `protogen:"open.v1"` + Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *GetConfigRequest) Reset() { - *x = GetConfigRequest{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[5] +func (x *CreateS3ProfileResponse) Reset() { + *x = CreateS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetConfigRequest) String() string { +func (x *CreateS3ProfileResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetConfigRequest) ProtoMessage() {} +func (*CreateS3ProfileResponse) ProtoMessage() {} -func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[5] +func (x *CreateS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[26] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -472,33 +1605,42 @@ func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. -func (*GetConfigRequest) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} +// Deprecated: Use CreateS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*CreateS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{26} } -type GetConfigResponse struct { +func (x *CreateS3ProfileResponse) GetProfile() *S3Profile { + if x != nil { + return x.Profile + } + return nil +} + +type UpdateS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + S3 *S3Config `protobuf:"bytes,3,opt,name=s3,proto3" json:"s3,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *GetConfigResponse) Reset() { - *x = GetConfigResponse{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[6] +func (x *UpdateS3ProfileRequest) Reset() { + *x = UpdateS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetConfigResponse) String() string { +func (x *UpdateS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetConfigResponse) ProtoMessage() {} +func (*UpdateS3ProfileRequest) ProtoMessage() {} -func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[6] +func (x *UpdateS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -509,40 +1651,54 @@ func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. -func (*GetConfigResponse) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} +// Deprecated: Use UpdateS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*UpdateS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{27} } -func (x *GetConfigResponse) GetConfig() *BackupConfig { +func (x *UpdateS3ProfileRequest) GetProfileId() string { if x != nil { - return x.Config + return x.ProfileId + } + return "" +} + +func (x *UpdateS3ProfileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateS3ProfileRequest) GetS3() *S3Config { + if x != nil { + return x.S3 } return nil } -type UpdateConfigRequest struct { +type UpdateS3ProfileResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *UpdateConfigRequest) Reset() { - *x = UpdateConfigRequest{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[7] +func (x *UpdateS3ProfileResponse) Reset() { + *x = UpdateS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *UpdateConfigRequest) String() string { +func (x *UpdateS3ProfileResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateConfigRequest) ProtoMessage() {} +func (*UpdateS3ProfileResponse) ProtoMessage() {} -func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[7] +func (x *UpdateS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -553,40 +1709,40 @@ func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. -func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} +// Deprecated: Use UpdateS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*UpdateS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{28} } -func (x *UpdateConfigRequest) GetConfig() *BackupConfig { +func (x *UpdateS3ProfileResponse) GetProfile() *S3Profile { if x != nil { - return x.Config + return x.Profile } return nil } -type UpdateConfigResponse struct { +type DeleteS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *UpdateConfigResponse) Reset() { - *x = UpdateConfigResponse{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[8] +func (x *DeleteS3ProfileRequest) Reset() { + *x = DeleteS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *UpdateConfigResponse) String() string { +func (x *DeleteS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateConfigResponse) ProtoMessage() {} +func (*DeleteS3ProfileRequest) ProtoMessage() {} -func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[8] +func (x *DeleteS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -597,40 +1753,39 @@ func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. -func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} +// Deprecated: Use DeleteS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*DeleteS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{29} } -func (x *UpdateConfigResponse) GetConfig() *BackupConfig { +func (x *DeleteS3ProfileRequest) GetProfileId() string { if x != nil { - return x.Config + return x.ProfileId } - return nil + return "" } -type ValidateS3Request struct { +type DeleteS3ProfileResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *ValidateS3Request) Reset() { - *x = ValidateS3Request{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[9] +func (x *DeleteS3ProfileResponse) Reset() { + *x = DeleteS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ValidateS3Request) String() string { +func (x *DeleteS3ProfileResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateS3Request) ProtoMessage() {} +func (*DeleteS3ProfileResponse) ProtoMessage() {} -func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[9] +func (x *DeleteS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -641,41 +1796,33 @@ func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. -func (*ValidateS3Request) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} -} - -func (x *ValidateS3Request) GetS3() *S3Config { - if x != nil { - return x.S3 - } - return nil +// Deprecated: Use DeleteS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*DeleteS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{30} } -type ValidateS3Response struct { +type SetActiveS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *ValidateS3Response) Reset() { - *x = ValidateS3Response{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[10] +func (x *SetActiveS3ProfileRequest) Reset() { + *x = SetActiveS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ValidateS3Response) String() string { +func (x *SetActiveS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateS3Response) ProtoMessage() {} +func (*SetActiveS3ProfileRequest) ProtoMessage() {} -func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[10] +func (x *SetActiveS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -686,38 +1833,78 @@ func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. -func (*ValidateS3Response) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} +// Deprecated: Use SetActiveS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*SetActiveS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{31} } -func (x *ValidateS3Response) GetOk() bool { +func (x *SetActiveS3ProfileRequest) GetProfileId() string { if x != nil { - return x.Ok + return x.ProfileId } - return false + return "" } -func (x *ValidateS3Response) GetMessage() string { +type SetActiveS3ProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetActiveS3ProfileResponse) Reset() { + *x = SetActiveS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[32] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetActiveS3ProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetActiveS3ProfileResponse) ProtoMessage() {} + +func (x *SetActiveS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[32] if x != nil { - return x.Message + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return "" + return mi.MessageOf(x) +} + +// Deprecated: Use SetActiveS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*SetActiveS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{32} +} + +func (x *SetActiveS3ProfileResponse) GetProfile() *S3Profile { + if x != nil { + return x.Profile + } + return nil } type CreateBackupJobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` - TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` - IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + S3ProfileId string `protobuf:"bytes,5,opt,name=s3_profile_id,json=s3ProfileId,proto3" json:"s3_profile_id,omitempty"` + PostgresProfileId string `protobuf:"bytes,6,opt,name=postgres_profile_id,json=postgresProfileId,proto3" json:"postgres_profile_id,omitempty"` + RedisProfileId string `protobuf:"bytes,7,opt,name=redis_profile_id,json=redisProfileId,proto3" json:"redis_profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CreateBackupJobRequest) Reset() { *x = CreateBackupJobRequest{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[11] + mi := &file_proto_backup_v1_backup_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -729,7 +1916,7 @@ func (x *CreateBackupJobRequest) String() string { func (*CreateBackupJobRequest) ProtoMessage() {} func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[11] + mi := &file_proto_backup_v1_backup_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -742,7 +1929,7 @@ func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupJobRequest.ProtoReflect.Descriptor instead. func (*CreateBackupJobRequest) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{33} } func (x *CreateBackupJobRequest) GetBackupType() string { @@ -773,6 +1960,27 @@ func (x *CreateBackupJobRequest) GetIdempotencyKey() string { return "" } +func (x *CreateBackupJobRequest) GetS3ProfileId() string { + if x != nil { + return x.S3ProfileId + } + return "" +} + +func (x *CreateBackupJobRequest) GetPostgresProfileId() string { + if x != nil { + return x.PostgresProfileId + } + return "" +} + +func (x *CreateBackupJobRequest) GetRedisProfileId() string { + if x != nil { + return x.RedisProfileId + } + return "" +} + type BackupArtifact struct { state protoimpl.MessageState `protogen:"open.v1"` LocalPath string `protobuf:"bytes,1,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` @@ -784,7 +1992,7 @@ type BackupArtifact struct { func (x *BackupArtifact) Reset() { *x = BackupArtifact{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[12] + mi := &file_proto_backup_v1_backup_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -796,7 +2004,7 @@ func (x *BackupArtifact) String() string { func (*BackupArtifact) ProtoMessage() {} func (x *BackupArtifact) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[12] + mi := &file_proto_backup_v1_backup_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -809,7 +2017,7 @@ func (x *BackupArtifact) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupArtifact.ProtoReflect.Descriptor instead. func (*BackupArtifact) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{34} } func (x *BackupArtifact) GetLocalPath() string { @@ -844,7 +2052,7 @@ type BackupS3Object struct { func (x *BackupS3Object) Reset() { *x = BackupS3Object{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[13] + mi := &file_proto_backup_v1_backup_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -856,7 +2064,7 @@ func (x *BackupS3Object) String() string { func (*BackupS3Object) ProtoMessage() {} func (x *BackupS3Object) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[13] + mi := &file_proto_backup_v1_backup_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -869,7 +2077,7 @@ func (x *BackupS3Object) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupS3Object.ProtoReflect.Descriptor instead. func (*BackupS3Object) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{35} } func (x *BackupS3Object) GetBucket() string { @@ -894,25 +2102,28 @@ func (x *BackupS3Object) GetEtag() string { } type BackupJob struct { - state protoimpl.MessageState `protogen:"open.v1"` - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` - BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` - IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` - UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` - StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` - ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` - S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` + ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` + S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` + S3ProfileId string `protobuf:"bytes,12,opt,name=s3_profile_id,json=s3ProfileId,proto3" json:"s3_profile_id,omitempty"` + PostgresProfileId string `protobuf:"bytes,13,opt,name=postgres_profile_id,json=postgresProfileId,proto3" json:"postgres_profile_id,omitempty"` + RedisProfileId string `protobuf:"bytes,14,opt,name=redis_profile_id,json=redisProfileId,proto3" json:"redis_profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BackupJob) Reset() { *x = BackupJob{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[14] + mi := &file_proto_backup_v1_backup_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -924,7 +2135,7 @@ func (x *BackupJob) String() string { func (*BackupJob) ProtoMessage() {} func (x *BackupJob) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[14] + mi := &file_proto_backup_v1_backup_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -937,7 +2148,7 @@ func (x *BackupJob) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupJob.ProtoReflect.Descriptor instead. func (*BackupJob) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{36} } func (x *BackupJob) GetJobId() string { @@ -1017,6 +2228,27 @@ func (x *BackupJob) GetS3Object() *BackupS3Object { return nil } +func (x *BackupJob) GetS3ProfileId() string { + if x != nil { + return x.S3ProfileId + } + return "" +} + +func (x *BackupJob) GetPostgresProfileId() string { + if x != nil { + return x.PostgresProfileId + } + return "" +} + +func (x *BackupJob) GetRedisProfileId() string { + if x != nil { + return x.RedisProfileId + } + return "" +} + type CreateBackupJobResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` @@ -1026,7 +2258,7 @@ type CreateBackupJobResponse struct { func (x *CreateBackupJobResponse) Reset() { *x = CreateBackupJobResponse{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[15] + mi := &file_proto_backup_v1_backup_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1038,7 +2270,7 @@ func (x *CreateBackupJobResponse) String() string { func (*CreateBackupJobResponse) ProtoMessage() {} func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[15] + mi := &file_proto_backup_v1_backup_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1051,7 +2283,7 @@ func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupJobResponse.ProtoReflect.Descriptor instead. func (*CreateBackupJobResponse) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{37} } func (x *CreateBackupJobResponse) GetJob() *BackupJob { @@ -1073,7 +2305,7 @@ type ListBackupJobsRequest struct { func (x *ListBackupJobsRequest) Reset() { *x = ListBackupJobsRequest{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[16] + mi := &file_proto_backup_v1_backup_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1085,7 +2317,7 @@ func (x *ListBackupJobsRequest) String() string { func (*ListBackupJobsRequest) ProtoMessage() {} func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[16] + mi := &file_proto_backup_v1_backup_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1098,7 +2330,7 @@ func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupJobsRequest.ProtoReflect.Descriptor instead. func (*ListBackupJobsRequest) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{38} } func (x *ListBackupJobsRequest) GetPageSize() int32 { @@ -1139,7 +2371,7 @@ type ListBackupJobsResponse struct { func (x *ListBackupJobsResponse) Reset() { *x = ListBackupJobsResponse{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[17] + mi := &file_proto_backup_v1_backup_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +2383,7 @@ func (x *ListBackupJobsResponse) String() string { func (*ListBackupJobsResponse) ProtoMessage() {} func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[17] + mi := &file_proto_backup_v1_backup_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1164,7 +2396,7 @@ func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupJobsResponse.ProtoReflect.Descriptor instead. func (*ListBackupJobsResponse) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{39} } func (x *ListBackupJobsResponse) GetItems() []*BackupJob { @@ -1190,7 +2422,7 @@ type GetBackupJobRequest struct { func (x *GetBackupJobRequest) Reset() { *x = GetBackupJobRequest{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[18] + mi := &file_proto_backup_v1_backup_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1202,7 +2434,7 @@ func (x *GetBackupJobRequest) String() string { func (*GetBackupJobRequest) ProtoMessage() {} func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[18] + mi := &file_proto_backup_v1_backup_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1215,7 +2447,7 @@ func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupJobRequest.ProtoReflect.Descriptor instead. func (*GetBackupJobRequest) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{40} } func (x *GetBackupJobRequest) GetJobId() string { @@ -1234,7 +2466,7 @@ type GetBackupJobResponse struct { func (x *GetBackupJobResponse) Reset() { *x = GetBackupJobResponse{} - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[19] + mi := &file_proto_backup_v1_backup_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1246,7 +2478,7 @@ func (x *GetBackupJobResponse) String() string { func (*GetBackupJobResponse) ProtoMessage() {} func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_internal_backup_proto_backup_v1_backup_proto_msgTypes[19] + mi := &file_proto_backup_v1_backup_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1259,7 +2491,7 @@ func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupJobResponse.ProtoReflect.Descriptor instead. func (*GetBackupJobResponse) Descriptor() ([]byte, []int) { - return file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{41} } func (x *GetBackupJobResponse) GetJob() *BackupJob { @@ -1269,11 +2501,11 @@ func (x *GetBackupJobResponse) GetJob() *BackupJob { return nil } -var File_internal_backup_proto_backup_v1_backup_proto protoreflect.FileDescriptor +var File_proto_backup_v1_backup_proto protoreflect.FileDescriptor -const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + +const file_proto_backup_v1_backup_proto_rawDesc = "" + "\n" + - ",internal/backup/proto/backup/v1/backup.proto\x12\tbackup.v1\"\x0f\n" + + "\x1cproto/backup/v1/backup.proto\x12\tbackup.v1\"\x0f\n" + "\rHealthRequest\"i\n" + "\x0eHealthResponse\x12\x16\n" + "\x06status\x18\x01 \x01(\tR\x06status\x12\x18\n" + @@ -1300,7 +2532,7 @@ const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + "\x11secret_access_key\x18\x06 \x01(\tR\x0fsecretAccessKey\x12\x16\n" + "\x06prefix\x18\a \x01(\tR\x06prefix\x12(\n" + "\x10force_path_style\x18\b \x01(\bR\x0eforcePathStyle\x12\x17\n" + - "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xbe\x02\n" + + "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xe3\x03\n" + "\fBackupConfig\x12\x1f\n" + "\vsource_mode\x18\x01 \x01(\tR\n" + "sourceMode\x12\x1f\n" + @@ -1312,26 +2544,123 @@ const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + "\tkeep_last\x18\x05 \x01(\x05R\bkeepLast\x123\n" + "\bpostgres\x18\x06 \x01(\v2\x17.backup.v1.SourceConfigR\bpostgres\x12-\n" + "\x05redis\x18\a \x01(\v2\x17.backup.v1.SourceConfigR\x05redis\x12#\n" + - "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"\x12\n" + + "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12/\n" + + "\x14active_s3_profile_id\x18\t \x01(\tR\x11activeS3ProfileId\x12;\n" + + "\x1aactive_postgres_profile_id\x18\n" + + " \x01(\tR\x17activePostgresProfileId\x125\n" + + "\x17active_redis_profile_id\x18\v \x01(\tR\x14activeRedisProfileId\"\x12\n" + "\x10GetConfigRequest\"D\n" + "\x11GetConfigResponse\x12/\n" + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"F\n" + "\x13UpdateConfigRequest\x12/\n" + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"G\n" + "\x14UpdateConfigResponse\x12/\n" + - "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"8\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"\xa0\x02\n" + + "\rSourceProfile\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12\x1b\n" + + "\tis_active\x18\x04 \x01(\bR\bisActive\x12/\n" + + "\x06config\x18\x05 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\x12/\n" + + "\x13password_configured\x18\x06 \x01(\bR\x12passwordConfigured\x12\x1d\n" + + "\n" + + "created_at\x18\a \x01(\tR\tcreatedAt\x12\x1d\n" + + "\n" + + "updated_at\x18\b \x01(\tR\tupdatedAt\"<\n" + + "\x19ListSourceProfilesRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\"L\n" + + "\x1aListSourceProfilesResponse\x12.\n" + + "\x05items\x18\x01 \x03(\v2\x18.backup.v1.SourceProfileR\x05items\"\xc0\x01\n" + + "\x1aCreateSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12/\n" + + "\x06config\x18\x04 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\x12\x1d\n" + + "\n" + + "set_active\x18\x05 \x01(\bR\tsetActive\"Q\n" + + "\x1bCreateSourceProfileResponse\x122\n" + + "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"\xa1\x01\n" + + "\x1aUpdateSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12/\n" + + "\x06config\x18\x04 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\"Q\n" + + "\x1bUpdateSourceProfileResponse\x122\n" + + "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"\\\n" + + "\x1aDeleteSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\"\x1d\n" + + "\x1bDeleteSourceProfileResponse\"_\n" + + "\x1dSetActiveSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\"T\n" + + "\x1eSetActiveSourceProfileResponse\x122\n" + + "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"8\n" + "\x11ValidateS3Request\x12#\n" + "\x02s3\x18\x01 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\">\n" + "\x12ValidateS3Response\x12\x0e\n" + "\x02ok\x18\x01 \x01(\bR\x02ok\x12\x18\n" + - "\amessage\x18\x02 \x01(\tR\amessage\"\xa7\x01\n" + + "\amessage\x18\x02 \x01(\tR\amessage\"\xff\x01\n" + + "\tS3Profile\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x1b\n" + + "\tis_active\x18\x03 \x01(\bR\bisActive\x12#\n" + + "\x02s3\x18\x04 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12?\n" + + "\x1csecret_access_key_configured\x18\x05 \x01(\bR\x19secretAccessKeyConfigured\x12\x1d\n" + + "\n" + + "created_at\x18\x06 \x01(\tR\tcreatedAt\x12\x1d\n" + + "\n" + + "updated_at\x18\a \x01(\tR\tupdatedAt\"\x17\n" + + "\x15ListS3ProfilesRequest\"D\n" + + "\x16ListS3ProfilesResponse\x12*\n" + + "\x05items\x18\x01 \x03(\v2\x14.backup.v1.S3ProfileR\x05items\"\x8f\x01\n" + + "\x16CreateS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12#\n" + + "\x02s3\x18\x03 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12\x1d\n" + + "\n" + + "set_active\x18\x04 \x01(\bR\tsetActive\"I\n" + + "\x17CreateS3ProfileResponse\x12.\n" + + "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"p\n" + + "\x16UpdateS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12#\n" + + "\x02s3\x18\x03 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"I\n" + + "\x17UpdateS3ProfileResponse\x12.\n" + + "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"7\n" + + "\x16DeleteS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\"\x19\n" + + "\x17DeleteS3ProfileResponse\":\n" + + "\x19SetActiveS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\"L\n" + + "\x1aSetActiveS3ProfileResponse\x12.\n" + + "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"\xa5\x02\n" + "\x16CreateBackupJobRequest\x12\x1f\n" + "\vbackup_type\x18\x01 \x01(\tR\n" + "backupType\x12 \n" + "\fupload_to_s3\x18\x02 \x01(\bR\n" + "uploadToS3\x12!\n" + "\ftriggered_by\x18\x03 \x01(\tR\vtriggeredBy\x12'\n" + - "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\"f\n" + + "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\x12\"\n" + + "\rs3_profile_id\x18\x05 \x01(\tR\vs3ProfileId\x12.\n" + + "\x13postgres_profile_id\x18\x06 \x01(\tR\x11postgresProfileId\x12(\n" + + "\x10redis_profile_id\x18\a \x01(\tR\x0eredisProfileId\"f\n" + "\x0eBackupArtifact\x12\x1d\n" + "\n" + "local_path\x18\x01 \x01(\tR\tlocalPath\x12\x1d\n" + @@ -1341,7 +2670,7 @@ const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + "\x0eBackupS3Object\x12\x16\n" + "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12\x10\n" + "\x03key\x18\x02 \x01(\tR\x03key\x12\x12\n" + - "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9d\x03\n" + + "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9b\x04\n" + "\tBackupJob\x12\x15\n" + "\x06job_id\x18\x01 \x01(\tR\x05jobId\x12\x1f\n" + "\vbackup_type\x18\x02 \x01(\tR\n" + @@ -1358,7 +2687,10 @@ const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + "\rerror_message\x18\t \x01(\tR\ferrorMessage\x125\n" + "\bartifact\x18\n" + " \x01(\v2\x19.backup.v1.BackupArtifactR\bartifact\x126\n" + - "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\"A\n" + + "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\x12\"\n" + + "\rs3_profile_id\x18\f \x01(\tR\vs3ProfileId\x12.\n" + + "\x13postgres_profile_id\x18\r \x01(\tR\x11postgresProfileId\x12(\n" + + "\x10redis_profile_id\x18\x0e \x01(\tR\x0eredisProfileId\"A\n" + "\x17CreateBackupJobResponse\x12&\n" + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job\"\x8c\x01\n" + "\x15ListBackupJobsRequest\x12\x1b\n" + @@ -1374,106 +2706,172 @@ const file_internal_backup_proto_backup_v1_backup_proto_rawDesc = "" + "\x13GetBackupJobRequest\x12\x15\n" + "\x06job_id\x18\x01 \x01(\tR\x05jobId\">\n" + "\x14GetBackupJobResponse\x12&\n" + - "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\xb4\x04\n" + + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\x80\f\n" + "\rBackupService\x12=\n" + "\x06Health\x12\x18.backup.v1.HealthRequest\x1a\x19.backup.v1.HealthResponse\x12F\n" + "\tGetConfig\x12\x1b.backup.v1.GetConfigRequest\x1a\x1c.backup.v1.GetConfigResponse\x12O\n" + - "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12I\n" + + "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12a\n" + + "\x12ListSourceProfiles\x12$.backup.v1.ListSourceProfilesRequest\x1a%.backup.v1.ListSourceProfilesResponse\x12d\n" + + "\x13CreateSourceProfile\x12%.backup.v1.CreateSourceProfileRequest\x1a&.backup.v1.CreateSourceProfileResponse\x12d\n" + + "\x13UpdateSourceProfile\x12%.backup.v1.UpdateSourceProfileRequest\x1a&.backup.v1.UpdateSourceProfileResponse\x12d\n" + + "\x13DeleteSourceProfile\x12%.backup.v1.DeleteSourceProfileRequest\x1a&.backup.v1.DeleteSourceProfileResponse\x12m\n" + + "\x16SetActiveSourceProfile\x12(.backup.v1.SetActiveSourceProfileRequest\x1a).backup.v1.SetActiveSourceProfileResponse\x12I\n" + "\n" + - "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12X\n" + + "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12U\n" + + "\x0eListS3Profiles\x12 .backup.v1.ListS3ProfilesRequest\x1a!.backup.v1.ListS3ProfilesResponse\x12X\n" + + "\x0fCreateS3Profile\x12!.backup.v1.CreateS3ProfileRequest\x1a\".backup.v1.CreateS3ProfileResponse\x12X\n" + + "\x0fUpdateS3Profile\x12!.backup.v1.UpdateS3ProfileRequest\x1a\".backup.v1.UpdateS3ProfileResponse\x12X\n" + + "\x0fDeleteS3Profile\x12!.backup.v1.DeleteS3ProfileRequest\x1a\".backup.v1.DeleteS3ProfileResponse\x12a\n" + + "\x12SetActiveS3Profile\x12$.backup.v1.SetActiveS3ProfileRequest\x1a%.backup.v1.SetActiveS3ProfileResponse\x12X\n" + "\x0fCreateBackupJob\x12!.backup.v1.CreateBackupJobRequest\x1a\".backup.v1.CreateBackupJobResponse\x12U\n" + "\x0eListBackupJobs\x12 .backup.v1.ListBackupJobsRequest\x1a!.backup.v1.ListBackupJobsResponse\x12O\n" + "\fGetBackupJob\x12\x1e.backup.v1.GetBackupJobRequest\x1a\x1f.backup.v1.GetBackupJobResponseBFZDgithub.com/Wei-Shaw/sub2api/internal/backup/proto/backup/v1;backupv1b\x06proto3" var ( - file_internal_backup_proto_backup_v1_backup_proto_rawDescOnce sync.Once - file_internal_backup_proto_backup_v1_backup_proto_rawDescData []byte + file_proto_backup_v1_backup_proto_rawDescOnce sync.Once + file_proto_backup_v1_backup_proto_rawDescData []byte ) -func file_internal_backup_proto_backup_v1_backup_proto_rawDescGZIP() []byte { - file_internal_backup_proto_backup_v1_backup_proto_rawDescOnce.Do(func() { - file_internal_backup_proto_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_internal_backup_proto_backup_v1_backup_proto_rawDesc), len(file_internal_backup_proto_backup_v1_backup_proto_rawDesc))) +func file_proto_backup_v1_backup_proto_rawDescGZIP() []byte { + file_proto_backup_v1_backup_proto_rawDescOnce.Do(func() { + file_proto_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc))) }) - return file_internal_backup_proto_backup_v1_backup_proto_rawDescData -} - -var file_internal_backup_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 20) -var file_internal_backup_proto_backup_v1_backup_proto_goTypes = []any{ - (*HealthRequest)(nil), // 0: backup.v1.HealthRequest - (*HealthResponse)(nil), // 1: backup.v1.HealthResponse - (*SourceConfig)(nil), // 2: backup.v1.SourceConfig - (*S3Config)(nil), // 3: backup.v1.S3Config - (*BackupConfig)(nil), // 4: backup.v1.BackupConfig - (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest - (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse - (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest - (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse - (*ValidateS3Request)(nil), // 9: backup.v1.ValidateS3Request - (*ValidateS3Response)(nil), // 10: backup.v1.ValidateS3Response - (*CreateBackupJobRequest)(nil), // 11: backup.v1.CreateBackupJobRequest - (*BackupArtifact)(nil), // 12: backup.v1.BackupArtifact - (*BackupS3Object)(nil), // 13: backup.v1.BackupS3Object - (*BackupJob)(nil), // 14: backup.v1.BackupJob - (*CreateBackupJobResponse)(nil), // 15: backup.v1.CreateBackupJobResponse - (*ListBackupJobsRequest)(nil), // 16: backup.v1.ListBackupJobsRequest - (*ListBackupJobsResponse)(nil), // 17: backup.v1.ListBackupJobsResponse - (*GetBackupJobRequest)(nil), // 18: backup.v1.GetBackupJobRequest - (*GetBackupJobResponse)(nil), // 19: backup.v1.GetBackupJobResponse -} -var file_internal_backup_proto_backup_v1_backup_proto_depIdxs = []int32{ + return file_proto_backup_v1_backup_proto_rawDescData +} + +var file_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 42) +var file_proto_backup_v1_backup_proto_goTypes = []any{ + (*HealthRequest)(nil), // 0: backup.v1.HealthRequest + (*HealthResponse)(nil), // 1: backup.v1.HealthResponse + (*SourceConfig)(nil), // 2: backup.v1.SourceConfig + (*S3Config)(nil), // 3: backup.v1.S3Config + (*BackupConfig)(nil), // 4: backup.v1.BackupConfig + (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest + (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse + (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest + (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse + (*SourceProfile)(nil), // 9: backup.v1.SourceProfile + (*ListSourceProfilesRequest)(nil), // 10: backup.v1.ListSourceProfilesRequest + (*ListSourceProfilesResponse)(nil), // 11: backup.v1.ListSourceProfilesResponse + (*CreateSourceProfileRequest)(nil), // 12: backup.v1.CreateSourceProfileRequest + (*CreateSourceProfileResponse)(nil), // 13: backup.v1.CreateSourceProfileResponse + (*UpdateSourceProfileRequest)(nil), // 14: backup.v1.UpdateSourceProfileRequest + (*UpdateSourceProfileResponse)(nil), // 15: backup.v1.UpdateSourceProfileResponse + (*DeleteSourceProfileRequest)(nil), // 16: backup.v1.DeleteSourceProfileRequest + (*DeleteSourceProfileResponse)(nil), // 17: backup.v1.DeleteSourceProfileResponse + (*SetActiveSourceProfileRequest)(nil), // 18: backup.v1.SetActiveSourceProfileRequest + (*SetActiveSourceProfileResponse)(nil), // 19: backup.v1.SetActiveSourceProfileResponse + (*ValidateS3Request)(nil), // 20: backup.v1.ValidateS3Request + (*ValidateS3Response)(nil), // 21: backup.v1.ValidateS3Response + (*S3Profile)(nil), // 22: backup.v1.S3Profile + (*ListS3ProfilesRequest)(nil), // 23: backup.v1.ListS3ProfilesRequest + (*ListS3ProfilesResponse)(nil), // 24: backup.v1.ListS3ProfilesResponse + (*CreateS3ProfileRequest)(nil), // 25: backup.v1.CreateS3ProfileRequest + (*CreateS3ProfileResponse)(nil), // 26: backup.v1.CreateS3ProfileResponse + (*UpdateS3ProfileRequest)(nil), // 27: backup.v1.UpdateS3ProfileRequest + (*UpdateS3ProfileResponse)(nil), // 28: backup.v1.UpdateS3ProfileResponse + (*DeleteS3ProfileRequest)(nil), // 29: backup.v1.DeleteS3ProfileRequest + (*DeleteS3ProfileResponse)(nil), // 30: backup.v1.DeleteS3ProfileResponse + (*SetActiveS3ProfileRequest)(nil), // 31: backup.v1.SetActiveS3ProfileRequest + (*SetActiveS3ProfileResponse)(nil), // 32: backup.v1.SetActiveS3ProfileResponse + (*CreateBackupJobRequest)(nil), // 33: backup.v1.CreateBackupJobRequest + (*BackupArtifact)(nil), // 34: backup.v1.BackupArtifact + (*BackupS3Object)(nil), // 35: backup.v1.BackupS3Object + (*BackupJob)(nil), // 36: backup.v1.BackupJob + (*CreateBackupJobResponse)(nil), // 37: backup.v1.CreateBackupJobResponse + (*ListBackupJobsRequest)(nil), // 38: backup.v1.ListBackupJobsRequest + (*ListBackupJobsResponse)(nil), // 39: backup.v1.ListBackupJobsResponse + (*GetBackupJobRequest)(nil), // 40: backup.v1.GetBackupJobRequest + (*GetBackupJobResponse)(nil), // 41: backup.v1.GetBackupJobResponse +} +var file_proto_backup_v1_backup_proto_depIdxs = []int32{ 2, // 0: backup.v1.BackupConfig.postgres:type_name -> backup.v1.SourceConfig 2, // 1: backup.v1.BackupConfig.redis:type_name -> backup.v1.SourceConfig 3, // 2: backup.v1.BackupConfig.s3:type_name -> backup.v1.S3Config 4, // 3: backup.v1.GetConfigResponse.config:type_name -> backup.v1.BackupConfig 4, // 4: backup.v1.UpdateConfigRequest.config:type_name -> backup.v1.BackupConfig 4, // 5: backup.v1.UpdateConfigResponse.config:type_name -> backup.v1.BackupConfig - 3, // 6: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config - 12, // 7: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact - 13, // 8: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object - 14, // 9: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob - 14, // 10: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob - 14, // 11: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob - 0, // 12: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest - 5, // 13: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest - 7, // 14: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest - 9, // 15: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request - 11, // 16: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest - 16, // 17: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest - 18, // 18: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest - 1, // 19: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse - 6, // 20: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse - 8, // 21: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse - 10, // 22: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response - 15, // 23: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse - 17, // 24: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse - 19, // 25: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse - 19, // [19:26] is the sub-list for method output_type - 12, // [12:19] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name -} - -func init() { file_internal_backup_proto_backup_v1_backup_proto_init() } -func file_internal_backup_proto_backup_v1_backup_proto_init() { - if File_internal_backup_proto_backup_v1_backup_proto != nil { + 2, // 6: backup.v1.SourceProfile.config:type_name -> backup.v1.SourceConfig + 9, // 7: backup.v1.ListSourceProfilesResponse.items:type_name -> backup.v1.SourceProfile + 2, // 8: backup.v1.CreateSourceProfileRequest.config:type_name -> backup.v1.SourceConfig + 9, // 9: backup.v1.CreateSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile + 2, // 10: backup.v1.UpdateSourceProfileRequest.config:type_name -> backup.v1.SourceConfig + 9, // 11: backup.v1.UpdateSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile + 9, // 12: backup.v1.SetActiveSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile + 3, // 13: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config + 3, // 14: backup.v1.S3Profile.s3:type_name -> backup.v1.S3Config + 22, // 15: backup.v1.ListS3ProfilesResponse.items:type_name -> backup.v1.S3Profile + 3, // 16: backup.v1.CreateS3ProfileRequest.s3:type_name -> backup.v1.S3Config + 22, // 17: backup.v1.CreateS3ProfileResponse.profile:type_name -> backup.v1.S3Profile + 3, // 18: backup.v1.UpdateS3ProfileRequest.s3:type_name -> backup.v1.S3Config + 22, // 19: backup.v1.UpdateS3ProfileResponse.profile:type_name -> backup.v1.S3Profile + 22, // 20: backup.v1.SetActiveS3ProfileResponse.profile:type_name -> backup.v1.S3Profile + 34, // 21: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact + 35, // 22: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object + 36, // 23: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob + 36, // 24: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob + 36, // 25: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob + 0, // 26: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest + 5, // 27: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest + 7, // 28: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest + 10, // 29: backup.v1.BackupService.ListSourceProfiles:input_type -> backup.v1.ListSourceProfilesRequest + 12, // 30: backup.v1.BackupService.CreateSourceProfile:input_type -> backup.v1.CreateSourceProfileRequest + 14, // 31: backup.v1.BackupService.UpdateSourceProfile:input_type -> backup.v1.UpdateSourceProfileRequest + 16, // 32: backup.v1.BackupService.DeleteSourceProfile:input_type -> backup.v1.DeleteSourceProfileRequest + 18, // 33: backup.v1.BackupService.SetActiveSourceProfile:input_type -> backup.v1.SetActiveSourceProfileRequest + 20, // 34: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request + 23, // 35: backup.v1.BackupService.ListS3Profiles:input_type -> backup.v1.ListS3ProfilesRequest + 25, // 36: backup.v1.BackupService.CreateS3Profile:input_type -> backup.v1.CreateS3ProfileRequest + 27, // 37: backup.v1.BackupService.UpdateS3Profile:input_type -> backup.v1.UpdateS3ProfileRequest + 29, // 38: backup.v1.BackupService.DeleteS3Profile:input_type -> backup.v1.DeleteS3ProfileRequest + 31, // 39: backup.v1.BackupService.SetActiveS3Profile:input_type -> backup.v1.SetActiveS3ProfileRequest + 33, // 40: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest + 38, // 41: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest + 40, // 42: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest + 1, // 43: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse + 6, // 44: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse + 8, // 45: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse + 11, // 46: backup.v1.BackupService.ListSourceProfiles:output_type -> backup.v1.ListSourceProfilesResponse + 13, // 47: backup.v1.BackupService.CreateSourceProfile:output_type -> backup.v1.CreateSourceProfileResponse + 15, // 48: backup.v1.BackupService.UpdateSourceProfile:output_type -> backup.v1.UpdateSourceProfileResponse + 17, // 49: backup.v1.BackupService.DeleteSourceProfile:output_type -> backup.v1.DeleteSourceProfileResponse + 19, // 50: backup.v1.BackupService.SetActiveSourceProfile:output_type -> backup.v1.SetActiveSourceProfileResponse + 21, // 51: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response + 24, // 52: backup.v1.BackupService.ListS3Profiles:output_type -> backup.v1.ListS3ProfilesResponse + 26, // 53: backup.v1.BackupService.CreateS3Profile:output_type -> backup.v1.CreateS3ProfileResponse + 28, // 54: backup.v1.BackupService.UpdateS3Profile:output_type -> backup.v1.UpdateS3ProfileResponse + 30, // 55: backup.v1.BackupService.DeleteS3Profile:output_type -> backup.v1.DeleteS3ProfileResponse + 32, // 56: backup.v1.BackupService.SetActiveS3Profile:output_type -> backup.v1.SetActiveS3ProfileResponse + 37, // 57: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse + 39, // 58: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse + 41, // 59: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse + 43, // [43:60] is the sub-list for method output_type + 26, // [26:43] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name +} + +func init() { file_proto_backup_v1_backup_proto_init() } +func file_proto_backup_v1_backup_proto_init() { + if File_proto_backup_v1_backup_proto != nil { return } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_internal_backup_proto_backup_v1_backup_proto_rawDesc), len(file_internal_backup_proto_backup_v1_backup_proto_rawDesc)), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc)), NumEnums: 0, - NumMessages: 20, + NumMessages: 42, NumExtensions: 0, NumServices: 1, }, - GoTypes: file_internal_backup_proto_backup_v1_backup_proto_goTypes, - DependencyIndexes: file_internal_backup_proto_backup_v1_backup_proto_depIdxs, - MessageInfos: file_internal_backup_proto_backup_v1_backup_proto_msgTypes, + GoTypes: file_proto_backup_v1_backup_proto_goTypes, + DependencyIndexes: file_proto_backup_v1_backup_proto_depIdxs, + MessageInfos: file_proto_backup_v1_backup_proto_msgTypes, }.Build() - File_internal_backup_proto_backup_v1_backup_proto = out.File - file_internal_backup_proto_backup_v1_backup_proto_goTypes = nil - file_internal_backup_proto_backup_v1_backup_proto_depIdxs = nil + File_proto_backup_v1_backup_proto = out.File + file_proto_backup_v1_backup_proto_goTypes = nil + file_proto_backup_v1_backup_proto_depIdxs = nil } diff --git a/backend/internal/backup/proto/backup/v1/backup.proto b/backend/internal/backup/proto/backup/v1/backup.proto index 38e015742..296a05148 100644 --- a/backend/internal/backup/proto/backup/v1/backup.proto +++ b/backend/internal/backup/proto/backup/v1/backup.proto @@ -8,7 +8,17 @@ service BackupService { rpc Health(HealthRequest) returns (HealthResponse); rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse); + rpc ListSourceProfiles(ListSourceProfilesRequest) returns (ListSourceProfilesResponse); + rpc CreateSourceProfile(CreateSourceProfileRequest) returns (CreateSourceProfileResponse); + rpc UpdateSourceProfile(UpdateSourceProfileRequest) returns (UpdateSourceProfileResponse); + rpc DeleteSourceProfile(DeleteSourceProfileRequest) returns (DeleteSourceProfileResponse); + rpc SetActiveSourceProfile(SetActiveSourceProfileRequest) returns (SetActiveSourceProfileResponse); rpc ValidateS3(ValidateS3Request) returns (ValidateS3Response); + rpc ListS3Profiles(ListS3ProfilesRequest) returns (ListS3ProfilesResponse); + rpc CreateS3Profile(CreateS3ProfileRequest) returns (CreateS3ProfileResponse); + rpc UpdateS3Profile(UpdateS3ProfileRequest) returns (UpdateS3ProfileResponse); + rpc DeleteS3Profile(DeleteS3ProfileRequest) returns (DeleteS3ProfileResponse); + rpc SetActiveS3Profile(SetActiveS3ProfileRequest) returns (SetActiveS3ProfileResponse); rpc CreateBackupJob(CreateBackupJobRequest) returns (CreateBackupJobResponse); rpc ListBackupJobs(ListBackupJobsRequest) returns (ListBackupJobsResponse); rpc GetBackupJob(GetBackupJobRequest) returns (GetBackupJobResponse); @@ -56,6 +66,9 @@ message BackupConfig { SourceConfig postgres = 6; SourceConfig redis = 7; S3Config s3 = 8; + string active_s3_profile_id = 9; + string active_postgres_profile_id = 10; + string active_redis_profile_id = 11; } message GetConfigRequest {} @@ -72,6 +85,64 @@ message UpdateConfigResponse { BackupConfig config = 1; } +message SourceProfile { + string source_type = 1; + string profile_id = 2; + string name = 3; + bool is_active = 4; + SourceConfig config = 5; + bool password_configured = 6; + string created_at = 7; + string updated_at = 8; +} + +message ListSourceProfilesRequest { + string source_type = 1; +} + +message ListSourceProfilesResponse { + repeated SourceProfile items = 1; +} + +message CreateSourceProfileRequest { + string source_type = 1; + string profile_id = 2; + string name = 3; + SourceConfig config = 4; + bool set_active = 5; +} + +message CreateSourceProfileResponse { + SourceProfile profile = 1; +} + +message UpdateSourceProfileRequest { + string source_type = 1; + string profile_id = 2; + string name = 3; + SourceConfig config = 4; +} + +message UpdateSourceProfileResponse { + SourceProfile profile = 1; +} + +message DeleteSourceProfileRequest { + string source_type = 1; + string profile_id = 2; +} + +message DeleteSourceProfileResponse {} + +message SetActiveSourceProfileRequest { + string source_type = 1; + string profile_id = 2; +} + +message SetActiveSourceProfileResponse { + SourceProfile profile = 1; +} + message ValidateS3Request { S3Config s3 = 1; } @@ -81,11 +152,65 @@ message ValidateS3Response { string message = 2; } +message S3Profile { + string profile_id = 1; + string name = 2; + bool is_active = 3; + S3Config s3 = 4; + bool secret_access_key_configured = 5; + string created_at = 6; + string updated_at = 7; +} + +message ListS3ProfilesRequest {} + +message ListS3ProfilesResponse { + repeated S3Profile items = 1; +} + +message CreateS3ProfileRequest { + string profile_id = 1; + string name = 2; + S3Config s3 = 3; + bool set_active = 4; +} + +message CreateS3ProfileResponse { + S3Profile profile = 1; +} + +message UpdateS3ProfileRequest { + string profile_id = 1; + string name = 2; + S3Config s3 = 3; +} + +message UpdateS3ProfileResponse { + S3Profile profile = 1; +} + +message DeleteS3ProfileRequest { + string profile_id = 1; +} + +message DeleteS3ProfileResponse {} + +message SetActiveS3ProfileRequest { + string profile_id = 1; +} + +message SetActiveS3ProfileResponse { + S3Profile profile = 1; +} + message CreateBackupJobRequest { string backup_type = 1; bool upload_to_s3 = 2; string triggered_by = 3; string idempotency_key = 4; + string s3_profile_id = 5; + string postgres_profile_id = 6; + string redis_profile_id = 7; } message BackupArtifact { @@ -112,6 +237,9 @@ message BackupJob { string error_message = 9; BackupArtifact artifact = 10; BackupS3Object s3_object = 11; + string s3_profile_id = 12; + string postgres_profile_id = 13; + string redis_profile_id = 14; } message CreateBackupJobResponse { diff --git a/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go b/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go index cf86ae813..c4731e1a1 100644 --- a/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go +++ b/backend/internal/backup/proto/backup/v1/backup_grpc.pb.go @@ -2,7 +2,7 @@ // versions: // - protoc-gen-go-grpc v1.6.0 // - protoc v6.32.1 -// source: internal/backup/proto/backup/v1/backup.proto +// source: proto/backup/v1/backup.proto package backupv1 @@ -19,13 +19,23 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" - BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" - BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" - BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" - BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" - BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" - BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" + BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" + BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" + BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" + BackupService_ListSourceProfiles_FullMethodName = "/backup.v1.BackupService/ListSourceProfiles" + BackupService_CreateSourceProfile_FullMethodName = "/backup.v1.BackupService/CreateSourceProfile" + BackupService_UpdateSourceProfile_FullMethodName = "/backup.v1.BackupService/UpdateSourceProfile" + BackupService_DeleteSourceProfile_FullMethodName = "/backup.v1.BackupService/DeleteSourceProfile" + BackupService_SetActiveSourceProfile_FullMethodName = "/backup.v1.BackupService/SetActiveSourceProfile" + BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" + BackupService_ListS3Profiles_FullMethodName = "/backup.v1.BackupService/ListS3Profiles" + BackupService_CreateS3Profile_FullMethodName = "/backup.v1.BackupService/CreateS3Profile" + BackupService_UpdateS3Profile_FullMethodName = "/backup.v1.BackupService/UpdateS3Profile" + BackupService_DeleteS3Profile_FullMethodName = "/backup.v1.BackupService/DeleteS3Profile" + BackupService_SetActiveS3Profile_FullMethodName = "/backup.v1.BackupService/SetActiveS3Profile" + BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" + BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" + BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" ) // BackupServiceClient is the client API for BackupService service. @@ -35,7 +45,17 @@ type BackupServiceClient interface { Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + ListSourceProfiles(ctx context.Context, in *ListSourceProfilesRequest, opts ...grpc.CallOption) (*ListSourceProfilesResponse, error) + CreateSourceProfile(ctx context.Context, in *CreateSourceProfileRequest, opts ...grpc.CallOption) (*CreateSourceProfileResponse, error) + UpdateSourceProfile(ctx context.Context, in *UpdateSourceProfileRequest, opts ...grpc.CallOption) (*UpdateSourceProfileResponse, error) + DeleteSourceProfile(ctx context.Context, in *DeleteSourceProfileRequest, opts ...grpc.CallOption) (*DeleteSourceProfileResponse, error) + SetActiveSourceProfile(ctx context.Context, in *SetActiveSourceProfileRequest, opts ...grpc.CallOption) (*SetActiveSourceProfileResponse, error) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) + ListS3Profiles(ctx context.Context, in *ListS3ProfilesRequest, opts ...grpc.CallOption) (*ListS3ProfilesResponse, error) + CreateS3Profile(ctx context.Context, in *CreateS3ProfileRequest, opts ...grpc.CallOption) (*CreateS3ProfileResponse, error) + UpdateS3Profile(ctx context.Context, in *UpdateS3ProfileRequest, opts ...grpc.CallOption) (*UpdateS3ProfileResponse, error) + DeleteS3Profile(ctx context.Context, in *DeleteS3ProfileRequest, opts ...grpc.CallOption) (*DeleteS3ProfileResponse, error) + SetActiveS3Profile(ctx context.Context, in *SetActiveS3ProfileRequest, opts ...grpc.CallOption) (*SetActiveS3ProfileResponse, error) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) @@ -79,6 +99,56 @@ func (c *backupServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfig return out, nil } +func (c *backupServiceClient) ListSourceProfiles(ctx context.Context, in *ListSourceProfilesRequest, opts ...grpc.CallOption) (*ListSourceProfilesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListSourceProfilesResponse) + err := c.cc.Invoke(ctx, BackupService_ListSourceProfiles_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) CreateSourceProfile(ctx context.Context, in *CreateSourceProfileRequest, opts ...grpc.CallOption) (*CreateSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_CreateSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) UpdateSourceProfile(ctx context.Context, in *UpdateSourceProfileRequest, opts ...grpc.CallOption) (*UpdateSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_UpdateSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) DeleteSourceProfile(ctx context.Context, in *DeleteSourceProfileRequest, opts ...grpc.CallOption) (*DeleteSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_DeleteSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) SetActiveSourceProfile(ctx context.Context, in *SetActiveSourceProfileRequest, opts ...grpc.CallOption) (*SetActiveSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SetActiveSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_SetActiveSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ValidateS3Response) @@ -89,6 +159,56 @@ func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Requ return out, nil } +func (c *backupServiceClient) ListS3Profiles(ctx context.Context, in *ListS3ProfilesRequest, opts ...grpc.CallOption) (*ListS3ProfilesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListS3ProfilesResponse) + err := c.cc.Invoke(ctx, BackupService_ListS3Profiles_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) CreateS3Profile(ctx context.Context, in *CreateS3ProfileRequest, opts ...grpc.CallOption) (*CreateS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_CreateS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) UpdateS3Profile(ctx context.Context, in *UpdateS3ProfileRequest, opts ...grpc.CallOption) (*UpdateS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_UpdateS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) DeleteS3Profile(ctx context.Context, in *DeleteS3ProfileRequest, opts ...grpc.CallOption) (*DeleteS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_DeleteS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) SetActiveS3Profile(ctx context.Context, in *SetActiveS3ProfileRequest, opts ...grpc.CallOption) (*SetActiveS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SetActiveS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_SetActiveS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *backupServiceClient) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CreateBackupJobResponse) @@ -126,7 +246,17 @@ type BackupServiceServer interface { Health(context.Context, *HealthRequest) (*HealthResponse, error) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + ListSourceProfiles(context.Context, *ListSourceProfilesRequest) (*ListSourceProfilesResponse, error) + CreateSourceProfile(context.Context, *CreateSourceProfileRequest) (*CreateSourceProfileResponse, error) + UpdateSourceProfile(context.Context, *UpdateSourceProfileRequest) (*UpdateSourceProfileResponse, error) + DeleteSourceProfile(context.Context, *DeleteSourceProfileRequest) (*DeleteSourceProfileResponse, error) + SetActiveSourceProfile(context.Context, *SetActiveSourceProfileRequest) (*SetActiveSourceProfileResponse, error) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) + ListS3Profiles(context.Context, *ListS3ProfilesRequest) (*ListS3ProfilesResponse, error) + CreateS3Profile(context.Context, *CreateS3ProfileRequest) (*CreateS3ProfileResponse, error) + UpdateS3Profile(context.Context, *UpdateS3ProfileRequest) (*UpdateS3ProfileResponse, error) + DeleteS3Profile(context.Context, *DeleteS3ProfileRequest) (*DeleteS3ProfileResponse, error) + SetActiveS3Profile(context.Context, *SetActiveS3ProfileRequest) (*SetActiveS3ProfileResponse, error) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) @@ -149,9 +279,39 @@ func (UnimplementedBackupServiceServer) GetConfig(context.Context, *GetConfigReq func (UnimplementedBackupServiceServer) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) { return nil, status.Error(codes.Unimplemented, "method UpdateConfig not implemented") } +func (UnimplementedBackupServiceServer) ListSourceProfiles(context.Context, *ListSourceProfilesRequest) (*ListSourceProfilesResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListSourceProfiles not implemented") +} +func (UnimplementedBackupServiceServer) CreateSourceProfile(context.Context, *CreateSourceProfileRequest) (*CreateSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateSourceProfile not implemented") +} +func (UnimplementedBackupServiceServer) UpdateSourceProfile(context.Context, *UpdateSourceProfileRequest) (*UpdateSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateSourceProfile not implemented") +} +func (UnimplementedBackupServiceServer) DeleteSourceProfile(context.Context, *DeleteSourceProfileRequest) (*DeleteSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method DeleteSourceProfile not implemented") +} +func (UnimplementedBackupServiceServer) SetActiveSourceProfile(context.Context, *SetActiveSourceProfileRequest) (*SetActiveSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method SetActiveSourceProfile not implemented") +} func (UnimplementedBackupServiceServer) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) { return nil, status.Error(codes.Unimplemented, "method ValidateS3 not implemented") } +func (UnimplementedBackupServiceServer) ListS3Profiles(context.Context, *ListS3ProfilesRequest) (*ListS3ProfilesResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListS3Profiles not implemented") +} +func (UnimplementedBackupServiceServer) CreateS3Profile(context.Context, *CreateS3ProfileRequest) (*CreateS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateS3Profile not implemented") +} +func (UnimplementedBackupServiceServer) UpdateS3Profile(context.Context, *UpdateS3ProfileRequest) (*UpdateS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateS3Profile not implemented") +} +func (UnimplementedBackupServiceServer) DeleteS3Profile(context.Context, *DeleteS3ProfileRequest) (*DeleteS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method DeleteS3Profile not implemented") +} +func (UnimplementedBackupServiceServer) SetActiveS3Profile(context.Context, *SetActiveS3ProfileRequest) (*SetActiveS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method SetActiveS3Profile not implemented") +} func (UnimplementedBackupServiceServer) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) { return nil, status.Error(codes.Unimplemented, "method CreateBackupJob not implemented") } @@ -236,6 +396,96 @@ func _BackupService_UpdateConfig_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _BackupService_ListSourceProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSourceProfilesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListSourceProfiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListSourceProfiles_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListSourceProfiles(ctx, req.(*ListSourceProfilesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_CreateSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).CreateSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_CreateSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).CreateSourceProfile(ctx, req.(*CreateSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_UpdateSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).UpdateSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_UpdateSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).UpdateSourceProfile(ctx, req.(*UpdateSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_DeleteSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).DeleteSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_DeleteSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).DeleteSourceProfile(ctx, req.(*DeleteSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_SetActiveSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetActiveSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).SetActiveSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_SetActiveSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).SetActiveSourceProfile(ctx, req.(*SetActiveSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ValidateS3Request) if err := dec(in); err != nil { @@ -254,6 +504,96 @@ func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _BackupService_ListS3Profiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListS3ProfilesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListS3Profiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListS3Profiles_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListS3Profiles(ctx, req.(*ListS3ProfilesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_CreateS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).CreateS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_CreateS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).CreateS3Profile(ctx, req.(*CreateS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_UpdateS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).UpdateS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_UpdateS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).UpdateS3Profile(ctx, req.(*UpdateS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_DeleteS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).DeleteS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_DeleteS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).DeleteS3Profile(ctx, req.(*DeleteS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_SetActiveS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetActiveS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).SetActiveS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_SetActiveS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).SetActiveS3Profile(ctx, req.(*SetActiveS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BackupService_CreateBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateBackupJobRequest) if err := dec(in); err != nil { @@ -327,10 +667,50 @@ var BackupService_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpdateConfig", Handler: _BackupService_UpdateConfig_Handler, }, + { + MethodName: "ListSourceProfiles", + Handler: _BackupService_ListSourceProfiles_Handler, + }, + { + MethodName: "CreateSourceProfile", + Handler: _BackupService_CreateSourceProfile_Handler, + }, + { + MethodName: "UpdateSourceProfile", + Handler: _BackupService_UpdateSourceProfile_Handler, + }, + { + MethodName: "DeleteSourceProfile", + Handler: _BackupService_DeleteSourceProfile_Handler, + }, + { + MethodName: "SetActiveSourceProfile", + Handler: _BackupService_SetActiveSourceProfile_Handler, + }, { MethodName: "ValidateS3", Handler: _BackupService_ValidateS3_Handler, }, + { + MethodName: "ListS3Profiles", + Handler: _BackupService_ListS3Profiles_Handler, + }, + { + MethodName: "CreateS3Profile", + Handler: _BackupService_CreateS3Profile_Handler, + }, + { + MethodName: "UpdateS3Profile", + Handler: _BackupService_UpdateS3Profile_Handler, + }, + { + MethodName: "DeleteS3Profile", + Handler: _BackupService_DeleteS3Profile_Handler, + }, + { + MethodName: "SetActiveS3Profile", + Handler: _BackupService_SetActiveS3Profile_Handler, + }, { MethodName: "CreateBackupJob", Handler: _BackupService_CreateBackupJob_Handler, @@ -345,5 +725,5 @@ var BackupService_ServiceDesc = grpc.ServiceDesc{ }, }, Streams: []grpc.StreamDesc{}, - Metadata: "internal/backup/proto/backup/v1/backup.proto", + Metadata: "proto/backup/v1/backup.proto", } diff --git a/backend/internal/handler/admin/data_management_handler.go b/backend/internal/handler/admin/data_management_handler.go index de9545093..4245fcd91 100644 --- a/backend/internal/handler/admin/data_management_handler.go +++ b/backend/internal/handler/admin/data_management_handler.go @@ -34,9 +34,52 @@ type TestS3ConnectionRequest struct { type CreateBackupJobRequest struct { BackupType string `json:"backup_type" binding:"required,oneof=postgres redis full"` UploadToS3 bool `json:"upload_to_s3"` + S3ProfileID string `json:"s3_profile_id"` + PostgresID string `json:"postgres_profile_id"` + RedisID string `json:"redis_profile_id"` IdempotencyKey string `json:"idempotency_key"` } +type CreateSourceProfileRequest struct { + ProfileID string `json:"profile_id" binding:"required"` + Name string `json:"name" binding:"required"` + Config service.DataManagementSourceConfig `json:"config" binding:"required"` + SetActive bool `json:"set_active"` +} + +type UpdateSourceProfileRequest struct { + Name string `json:"name" binding:"required"` + Config service.DataManagementSourceConfig `json:"config" binding:"required"` +} + +type CreateS3ProfileRequest struct { + ProfileID string `json:"profile_id" binding:"required"` + Name string `json:"name" binding:"required"` + Enabled bool `json:"enabled"` + Endpoint string `json:"endpoint"` + Region string `json:"region"` + Bucket string `json:"bucket"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Prefix string `json:"prefix"` + ForcePathStyle bool `json:"force_path_style"` + UseSSL bool `json:"use_ssl"` + SetActive bool `json:"set_active"` +} + +type UpdateS3ProfileRequest struct { + Name string `json:"name" binding:"required"` + Enabled bool `json:"enabled"` + Endpoint string `json:"endpoint"` + Region string `json:"region"` + Bucket string `json:"bucket"` + AccessKeyID string `json:"access_key_id"` + SecretAccessKey string `json:"secret_access_key"` + Prefix string `json:"prefix"` + ForcePathStyle bool `json:"force_path_style"` + UseSSL bool `json:"use_ssl"` +} + func (h *DataManagementHandler) GetAgentHealth(c *gin.Context) { health := h.getAgentHealth(c) payload := gin.H{ @@ -131,6 +174,9 @@ func (h *DataManagementHandler) CreateBackupJob(c *gin.Context) { job, err := h.dataManagementService.CreateBackupJob(c.Request.Context(), service.DataManagementCreateBackupJobInput{ BackupType: req.BackupType, UploadToS3: req.UploadToS3, + S3ProfileID: req.S3ProfileID, + PostgresID: req.PostgresID, + RedisID: req.RedisID, TriggeredBy: triggeredBy, IdempotencyKey: req.IdempotencyKey, }) @@ -141,6 +187,258 @@ func (h *DataManagementHandler) CreateBackupJob(c *gin.Context) { response.Success(c, gin.H{"job_id": job.JobID, "status": job.Status}) } +func (h *DataManagementHandler) ListSourceProfiles(c *gin.Context) { + sourceType := strings.TrimSpace(c.Param("source_type")) + if sourceType == "" { + response.BadRequest(c, "Invalid source_type") + return + } + if sourceType != "postgres" && sourceType != "redis" { + response.BadRequest(c, "source_type must be postgres or redis") + return + } + + if !h.requireAgentEnabled(c) { + return + } + items, err := h.dataManagementService.ListSourceProfiles(c.Request.Context(), sourceType) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"items": items}) +} + +func (h *DataManagementHandler) CreateSourceProfile(c *gin.Context) { + sourceType := strings.TrimSpace(c.Param("source_type")) + if sourceType != "postgres" && sourceType != "redis" { + response.BadRequest(c, "source_type must be postgres or redis") + return + } + + var req CreateSourceProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if !h.requireAgentEnabled(c) { + return + } + profile, err := h.dataManagementService.CreateSourceProfile(c.Request.Context(), service.DataManagementCreateSourceProfileInput{ + SourceType: sourceType, + ProfileID: req.ProfileID, + Name: req.Name, + Config: req.Config, + SetActive: req.SetActive, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, profile) +} + +func (h *DataManagementHandler) UpdateSourceProfile(c *gin.Context) { + sourceType := strings.TrimSpace(c.Param("source_type")) + if sourceType != "postgres" && sourceType != "redis" { + response.BadRequest(c, "source_type must be postgres or redis") + return + } + profileID := strings.TrimSpace(c.Param("profile_id")) + if profileID == "" { + response.BadRequest(c, "Invalid profile_id") + return + } + + var req UpdateSourceProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if !h.requireAgentEnabled(c) { + return + } + profile, err := h.dataManagementService.UpdateSourceProfile(c.Request.Context(), service.DataManagementUpdateSourceProfileInput{ + SourceType: sourceType, + ProfileID: profileID, + Name: req.Name, + Config: req.Config, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, profile) +} + +func (h *DataManagementHandler) DeleteSourceProfile(c *gin.Context) { + sourceType := strings.TrimSpace(c.Param("source_type")) + if sourceType != "postgres" && sourceType != "redis" { + response.BadRequest(c, "source_type must be postgres or redis") + return + } + profileID := strings.TrimSpace(c.Param("profile_id")) + if profileID == "" { + response.BadRequest(c, "Invalid profile_id") + return + } + + if !h.requireAgentEnabled(c) { + return + } + if err := h.dataManagementService.DeleteSourceProfile(c.Request.Context(), sourceType, profileID); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"deleted": true}) +} + +func (h *DataManagementHandler) SetActiveSourceProfile(c *gin.Context) { + sourceType := strings.TrimSpace(c.Param("source_type")) + if sourceType != "postgres" && sourceType != "redis" { + response.BadRequest(c, "source_type must be postgres or redis") + return + } + profileID := strings.TrimSpace(c.Param("profile_id")) + if profileID == "" { + response.BadRequest(c, "Invalid profile_id") + return + } + + if !h.requireAgentEnabled(c) { + return + } + profile, err := h.dataManagementService.SetActiveSourceProfile(c.Request.Context(), sourceType, profileID) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, profile) +} + +func (h *DataManagementHandler) ListS3Profiles(c *gin.Context) { + if !h.requireAgentEnabled(c) { + return + } + + items, err := h.dataManagementService.ListS3Profiles(c.Request.Context()) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"items": items}) +} + +func (h *DataManagementHandler) CreateS3Profile(c *gin.Context) { + var req CreateS3ProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if !h.requireAgentEnabled(c) { + return + } + + profile, err := h.dataManagementService.CreateS3Profile(c.Request.Context(), service.DataManagementCreateS3ProfileInput{ + ProfileID: req.ProfileID, + Name: req.Name, + SetActive: req.SetActive, + S3: service.DataManagementS3Config{ + Enabled: req.Enabled, + Endpoint: req.Endpoint, + Region: req.Region, + Bucket: req.Bucket, + AccessKeyID: req.AccessKeyID, + SecretAccessKey: req.SecretAccessKey, + Prefix: req.Prefix, + ForcePathStyle: req.ForcePathStyle, + UseSSL: req.UseSSL, + }, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, profile) +} + +func (h *DataManagementHandler) UpdateS3Profile(c *gin.Context) { + var req UpdateS3ProfileRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + profileID := strings.TrimSpace(c.Param("profile_id")) + if profileID == "" { + response.BadRequest(c, "Invalid profile_id") + return + } + + if !h.requireAgentEnabled(c) { + return + } + + profile, err := h.dataManagementService.UpdateS3Profile(c.Request.Context(), service.DataManagementUpdateS3ProfileInput{ + ProfileID: profileID, + Name: req.Name, + S3: service.DataManagementS3Config{ + Enabled: req.Enabled, + Endpoint: req.Endpoint, + Region: req.Region, + Bucket: req.Bucket, + AccessKeyID: req.AccessKeyID, + SecretAccessKey: req.SecretAccessKey, + Prefix: req.Prefix, + ForcePathStyle: req.ForcePathStyle, + UseSSL: req.UseSSL, + }, + }) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, profile) +} + +func (h *DataManagementHandler) DeleteS3Profile(c *gin.Context) { + profileID := strings.TrimSpace(c.Param("profile_id")) + if profileID == "" { + response.BadRequest(c, "Invalid profile_id") + return + } + + if !h.requireAgentEnabled(c) { + return + } + if err := h.dataManagementService.DeleteS3Profile(c.Request.Context(), profileID); err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, gin.H{"deleted": true}) +} + +func (h *DataManagementHandler) SetActiveS3Profile(c *gin.Context) { + profileID := strings.TrimSpace(c.Param("profile_id")) + if profileID == "" { + response.BadRequest(c, "Invalid profile_id") + return + } + + if !h.requireAgentEnabled(c) { + return + } + profile, err := h.dataManagementService.SetActiveS3Profile(c.Request.Context(), profileID) + if err != nil { + response.ErrorFrom(c, err) + return + } + response.Success(c, profile) +} + func (h *DataManagementHandler) ListBackupJobs(c *gin.Context) { if !h.requireAgentEnabled(c) { return diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 6e25835dc..db3f0b6fc 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -381,7 +381,17 @@ func registerDataManagementRoutes(admin *gin.RouterGroup, h *handler.Handlers) { dataManagement.GET("/agent/health", h.Admin.DataManagement.GetAgentHealth) dataManagement.GET("/config", h.Admin.DataManagement.GetConfig) dataManagement.PUT("/config", h.Admin.DataManagement.UpdateConfig) + dataManagement.GET("/sources/:source_type/profiles", h.Admin.DataManagement.ListSourceProfiles) + dataManagement.POST("/sources/:source_type/profiles", h.Admin.DataManagement.CreateSourceProfile) + dataManagement.PUT("/sources/:source_type/profiles/:profile_id", h.Admin.DataManagement.UpdateSourceProfile) + dataManagement.DELETE("/sources/:source_type/profiles/:profile_id", h.Admin.DataManagement.DeleteSourceProfile) + dataManagement.POST("/sources/:source_type/profiles/:profile_id/activate", h.Admin.DataManagement.SetActiveSourceProfile) dataManagement.POST("/s3/test", h.Admin.DataManagement.TestS3) + dataManagement.GET("/s3/profiles", h.Admin.DataManagement.ListS3Profiles) + dataManagement.POST("/s3/profiles", h.Admin.DataManagement.CreateS3Profile) + dataManagement.PUT("/s3/profiles/:profile_id", h.Admin.DataManagement.UpdateS3Profile) + dataManagement.DELETE("/s3/profiles/:profile_id", h.Admin.DataManagement.DeleteS3Profile) + dataManagement.POST("/s3/profiles/:profile_id/activate", h.Admin.DataManagement.SetActiveS3Profile) dataManagement.POST("/backups", h.Admin.DataManagement.CreateBackupJob) dataManagement.GET("/backups", h.Admin.DataManagement.ListBackupJobs) dataManagement.GET("/backups/:job_id", h.Admin.DataManagement.GetBackupJob) diff --git a/backend/internal/service/data_management_grpc.go b/backend/internal/service/data_management_grpc.go index 61c0e5cc5..4aabdfec2 100644 --- a/backend/internal/service/data_management_grpc.go +++ b/backend/internal/service/data_management_grpc.go @@ -20,6 +20,7 @@ import ( const ( backupInvalidArgumentReason = "BACKUP_INVALID_ARGUMENT" backupResourceNotFoundReason = "BACKUP_RESOURCE_NOT_FOUND" + backupResourceConflictReason = "BACKUP_RESOURCE_CONFLICT" backupFailedPrecondition = "BACKUP_FAILED_PRECONDITION" backupAgentTimeoutReason = "BACKUP_AGENT_TIMEOUT" backupAgentInternalReason = "BACKUP_AGENT_INTERNAL" @@ -60,14 +61,17 @@ type DataManagementS3Config struct { } type DataManagementConfig struct { - SourceMode string `json:"source_mode"` - BackupRoot string `json:"backup_root"` - SQLitePath string `json:"sqlite_path,omitempty"` - RetentionDays int32 `json:"retention_days"` - KeepLast int32 `json:"keep_last"` - Postgres DataManagementPostgresConfig `json:"postgres"` - Redis DataManagementRedisConfig `json:"redis"` - S3 DataManagementS3Config `json:"s3"` + SourceMode string `json:"source_mode"` + BackupRoot string `json:"backup_root"` + SQLitePath string `json:"sqlite_path,omitempty"` + RetentionDays int32 `json:"retention_days"` + KeepLast int32 `json:"keep_last"` + ActivePostgresID string `json:"active_postgres_profile_id"` + ActiveRedisID string `json:"active_redis_profile_id"` + Postgres DataManagementPostgresConfig `json:"postgres"` + Redis DataManagementRedisConfig `json:"redis"` + S3 DataManagementS3Config `json:"s3"` + ActiveS3ProfileID string `json:"active_s3_profile_id"` } type DataManagementTestS3Result struct { @@ -80,6 +84,9 @@ type DataManagementCreateBackupJobInput struct { UploadToS3 bool TriggeredBy string IdempotencyKey string + S3ProfileID string + PostgresID string + RedisID string } type DataManagementListBackupJobsInput struct { @@ -108,6 +115,9 @@ type DataManagementBackupJob struct { TriggeredBy string `json:"triggered_by"` IdempotencyKey string `json:"idempotency_key,omitempty"` UploadToS3 bool `json:"upload_to_s3"` + S3ProfileID string `json:"s3_profile_id,omitempty"` + PostgresID string `json:"postgres_profile_id,omitempty"` + RedisID string `json:"redis_profile_id,omitempty"` StartedAt string `json:"started_at,omitempty"` FinishedAt string `json:"finished_at,omitempty"` ErrorMessage string `json:"error_message,omitempty"` @@ -115,6 +125,68 @@ type DataManagementBackupJob struct { S3Object DataManagementS3ObjectInfo `json:"s3"` } +type DataManagementSourceProfile struct { + SourceType string `json:"source_type"` + ProfileID string `json:"profile_id"` + Name string `json:"name"` + IsActive bool `json:"is_active"` + Config DataManagementSourceConfig `json:"config"` + PasswordConfigured bool `json:"password_configured"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` +} + +type DataManagementSourceConfig struct { + Host string `json:"host"` + Port int32 `json:"port"` + User string `json:"user"` + Password string `json:"password,omitempty"` + Database string `json:"database"` + SSLMode string `json:"ssl_mode"` + Addr string `json:"addr"` + Username string `json:"username"` + DB int32 `json:"db"` + ContainerName string `json:"container_name"` +} + +type DataManagementCreateSourceProfileInput struct { + SourceType string + ProfileID string + Name string + Config DataManagementSourceConfig + SetActive bool +} + +type DataManagementUpdateSourceProfileInput struct { + SourceType string + ProfileID string + Name string + Config DataManagementSourceConfig +} + +type DataManagementS3Profile struct { + ProfileID string `json:"profile_id"` + Name string `json:"name"` + IsActive bool `json:"is_active"` + S3 DataManagementS3Config `json:"s3"` + SecretAccessKeyConfigured bool `json:"secret_access_key_configured"` + CreatedAt string `json:"created_at,omitempty"` + UpdatedAt string `json:"updated_at,omitempty"` +} + +type DataManagementCreateS3ProfileInput struct { + ProfileID string + Name string + S3 DataManagementS3Config + SetActive bool +} + +type DataManagementUpdateS3ProfileInput struct { + ProfileID string + Name string + S3 DataManagementS3Config +} + type DataManagementListBackupJobsResult struct { Items []DataManagementBackupJob `json:"items"` NextPageToken string `json:"next_page_token,omitempty"` @@ -150,6 +222,120 @@ func (s *DataManagementService) UpdateConfig(ctx context.Context, cfg DataManage return mapProtoConfig(resp.GetConfig()), nil } +func (s *DataManagementService) ListSourceProfiles(ctx context.Context, sourceType string) ([]DataManagementSourceProfile, error) { + sourceType = strings.TrimSpace(sourceType) + if sourceType != "postgres" && sourceType != "redis" { + return nil, infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + } + + var resp *backupv1.ListSourceProfilesResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.ListSourceProfiles(callCtx, &backupv1.ListSourceProfilesRequest{ + SourceType: sourceType, + }) + return callErr + }) + if err != nil { + return nil, err + } + + items := make([]DataManagementSourceProfile, 0, len(resp.GetItems())) + for _, item := range resp.GetItems() { + items = append(items, mapProtoSourceProfile(item)) + } + return items, nil +} + +func (s *DataManagementService) CreateSourceProfile(ctx context.Context, input DataManagementCreateSourceProfileInput) (DataManagementSourceProfile, error) { + if err := validateSourceProfileInput(input.SourceType, input.ProfileID, input.Name); err != nil { + return DataManagementSourceProfile{}, err + } + + var resp *backupv1.CreateSourceProfileResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.CreateSourceProfile(callCtx, &backupv1.CreateSourceProfileRequest{ + SourceType: strings.TrimSpace(input.SourceType), + ProfileId: strings.TrimSpace(input.ProfileID), + Name: strings.TrimSpace(input.Name), + Config: mapToProtoSourceConfig(input.Config), + SetActive: input.SetActive, + }) + return callErr + }) + if err != nil { + return DataManagementSourceProfile{}, err + } + return mapProtoSourceProfile(resp.GetProfile()), nil +} + +func (s *DataManagementService) UpdateSourceProfile(ctx context.Context, input DataManagementUpdateSourceProfileInput) (DataManagementSourceProfile, error) { + if err := validateSourceProfileInput(input.SourceType, input.ProfileID, input.Name); err != nil { + return DataManagementSourceProfile{}, err + } + + var resp *backupv1.UpdateSourceProfileResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.UpdateSourceProfile(callCtx, &backupv1.UpdateSourceProfileRequest{ + SourceType: strings.TrimSpace(input.SourceType), + ProfileId: strings.TrimSpace(input.ProfileID), + Name: strings.TrimSpace(input.Name), + Config: mapToProtoSourceConfig(input.Config), + }) + return callErr + }) + if err != nil { + return DataManagementSourceProfile{}, err + } + return mapProtoSourceProfile(resp.GetProfile()), nil +} + +func (s *DataManagementService) DeleteSourceProfile(ctx context.Context, sourceType, profileID string) error { + sourceType = strings.TrimSpace(sourceType) + profileID = strings.TrimSpace(profileID) + if sourceType != "postgres" && sourceType != "redis" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + } + if profileID == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + } + + return s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + _, err := client.DeleteSourceProfile(callCtx, &backupv1.DeleteSourceProfileRequest{ + SourceType: sourceType, + ProfileId: profileID, + }) + return err + }) +} + +func (s *DataManagementService) SetActiveSourceProfile(ctx context.Context, sourceType, profileID string) (DataManagementSourceProfile, error) { + sourceType = strings.TrimSpace(sourceType) + profileID = strings.TrimSpace(profileID) + if sourceType != "postgres" && sourceType != "redis" { + return DataManagementSourceProfile{}, infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + } + if profileID == "" { + return DataManagementSourceProfile{}, infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + } + + var resp *backupv1.SetActiveSourceProfileResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.SetActiveSourceProfile(callCtx, &backupv1.SetActiveSourceProfileRequest{ + SourceType: sourceType, + ProfileId: profileID, + }) + return callErr + }) + if err != nil { + return DataManagementSourceProfile{}, err + } + return mapProtoSourceProfile(resp.GetProfile()), nil +} + func (s *DataManagementService) ValidateS3(ctx context.Context, cfg DataManagementS3Config) (DataManagementTestS3Result, error) { if err := validateS3Config(cfg); err != nil { return DataManagementTestS3Result{}, err @@ -179,15 +365,109 @@ func (s *DataManagementService) ValidateS3(ctx context.Context, cfg DataManageme return DataManagementTestS3Result{OK: resp.GetOk(), Message: resp.GetMessage()}, nil } +func (s *DataManagementService) ListS3Profiles(ctx context.Context) ([]DataManagementS3Profile, error) { + var resp *backupv1.ListS3ProfilesResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.ListS3Profiles(callCtx, &backupv1.ListS3ProfilesRequest{}) + return callErr + }) + if err != nil { + return nil, err + } + + items := make([]DataManagementS3Profile, 0, len(resp.GetItems())) + for _, item := range resp.GetItems() { + items = append(items, mapProtoS3Profile(item)) + } + return items, nil +} + +func (s *DataManagementService) CreateS3Profile(ctx context.Context, input DataManagementCreateS3ProfileInput) (DataManagementS3Profile, error) { + if err := validateS3ProfileInput(input.ProfileID, input.Name, input.S3); err != nil { + return DataManagementS3Profile{}, err + } + + var resp *backupv1.CreateS3ProfileResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.CreateS3Profile(callCtx, &backupv1.CreateS3ProfileRequest{ + ProfileId: strings.TrimSpace(input.ProfileID), + Name: strings.TrimSpace(input.Name), + S3: mapToProtoS3Config(input.S3), + SetActive: input.SetActive, + }) + return callErr + }) + if err != nil { + return DataManagementS3Profile{}, err + } + return mapProtoS3Profile(resp.GetProfile()), nil +} + +func (s *DataManagementService) UpdateS3Profile(ctx context.Context, input DataManagementUpdateS3ProfileInput) (DataManagementS3Profile, error) { + if err := validateS3ProfileInput(input.ProfileID, input.Name, input.S3); err != nil { + return DataManagementS3Profile{}, err + } + + var resp *backupv1.UpdateS3ProfileResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.UpdateS3Profile(callCtx, &backupv1.UpdateS3ProfileRequest{ + ProfileId: strings.TrimSpace(input.ProfileID), + Name: strings.TrimSpace(input.Name), + S3: mapToProtoS3Config(input.S3), + }) + return callErr + }) + if err != nil { + return DataManagementS3Profile{}, err + } + return mapProtoS3Profile(resp.GetProfile()), nil +} + +func (s *DataManagementService) DeleteS3Profile(ctx context.Context, profileID string) error { + profileID = strings.TrimSpace(profileID) + if profileID == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + } + + return s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + _, err := client.DeleteS3Profile(callCtx, &backupv1.DeleteS3ProfileRequest{ProfileId: profileID}) + return err + }) +} + +func (s *DataManagementService) SetActiveS3Profile(ctx context.Context, profileID string) (DataManagementS3Profile, error) { + profileID = strings.TrimSpace(profileID) + if profileID == "" { + return DataManagementS3Profile{}, infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + } + + var resp *backupv1.SetActiveS3ProfileResponse + err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { + var callErr error + resp, callErr = client.SetActiveS3Profile(callCtx, &backupv1.SetActiveS3ProfileRequest{ProfileId: profileID}) + return callErr + }) + if err != nil { + return DataManagementS3Profile{}, err + } + return mapProtoS3Profile(resp.GetProfile()), nil +} + func (s *DataManagementService) CreateBackupJob(ctx context.Context, input DataManagementCreateBackupJobInput) (DataManagementBackupJob, error) { var resp *backupv1.CreateBackupJobResponse err := s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { var callErr error resp, callErr = client.CreateBackupJob(callCtx, &backupv1.CreateBackupJobRequest{ - BackupType: strings.TrimSpace(input.BackupType), - UploadToS3: input.UploadToS3, - TriggeredBy: strings.TrimSpace(input.TriggeredBy), - IdempotencyKey: strings.TrimSpace(input.IdempotencyKey), + BackupType: strings.TrimSpace(input.BackupType), + UploadToS3: input.UploadToS3, + TriggeredBy: strings.TrimSpace(input.TriggeredBy), + IdempotencyKey: strings.TrimSpace(input.IdempotencyKey), + S3ProfileId: strings.TrimSpace(input.S3ProfileID), + PostgresProfileId: strings.TrimSpace(input.PostgresID), + RedisProfileId: strings.TrimSpace(input.RedisID), }) return callErr }) @@ -287,6 +567,8 @@ func mapBackupGRPCError(err error, socketPath string) error { switch st.Code() { case codes.InvalidArgument: return infraerrors.BadRequest(backupInvalidArgumentReason, st.Message()) + case codes.AlreadyExists: + return infraerrors.New(409, backupResourceConflictReason, st.Message()) case codes.NotFound: return infraerrors.NotFound(backupResourceNotFoundReason, st.Message()) case codes.FailedPrecondition: @@ -313,11 +595,14 @@ func mapProtoConfig(cfg *backupv1.BackupConfig) DataManagementConfig { redis := cfg.GetRedis() s3Cfg := cfg.GetS3() return DataManagementConfig{ - SourceMode: cfg.GetSourceMode(), - BackupRoot: cfg.GetBackupRoot(), - SQLitePath: cfg.GetSqlitePath(), - RetentionDays: cfg.GetRetentionDays(), - KeepLast: cfg.GetKeepLast(), + SourceMode: cfg.GetSourceMode(), + BackupRoot: cfg.GetBackupRoot(), + SQLitePath: cfg.GetSqlitePath(), + RetentionDays: cfg.GetRetentionDays(), + KeepLast: cfg.GetKeepLast(), + ActivePostgresID: cfg.GetActivePostgresProfileId(), + ActiveRedisID: cfg.GetActiveRedisProfileId(), + ActiveS3ProfileID: cfg.GetActiveS3ProfileId(), Postgres: DataManagementPostgresConfig{ Host: postgres.GetHost(), Port: postgres.GetPort(), @@ -350,11 +635,14 @@ func mapProtoConfig(cfg *backupv1.BackupConfig) DataManagementConfig { func mapToProtoConfig(cfg DataManagementConfig) *backupv1.BackupConfig { return &backupv1.BackupConfig{ - SourceMode: strings.TrimSpace(cfg.SourceMode), - BackupRoot: strings.TrimSpace(cfg.BackupRoot), - SqlitePath: strings.TrimSpace(cfg.SQLitePath), - RetentionDays: cfg.RetentionDays, - KeepLast: cfg.KeepLast, + SourceMode: strings.TrimSpace(cfg.SourceMode), + BackupRoot: strings.TrimSpace(cfg.BackupRoot), + SqlitePath: strings.TrimSpace(cfg.SQLitePath), + RetentionDays: cfg.RetentionDays, + KeepLast: cfg.KeepLast, + ActivePostgresProfileId: strings.TrimSpace(cfg.ActivePostgresID), + ActiveRedisProfileId: strings.TrimSpace(cfg.ActiveRedisID), + ActiveS3ProfileId: strings.TrimSpace(cfg.ActiveS3ProfileID), Postgres: &backupv1.SourceConfig{ Host: strings.TrimSpace(cfg.Postgres.Host), Port: cfg.Postgres.Port, @@ -371,17 +659,95 @@ func mapToProtoConfig(cfg DataManagementConfig) *backupv1.BackupConfig { Db: cfg.Redis.DB, ContainerName: strings.TrimSpace(cfg.Redis.ContainerName), }, - S3: &backupv1.S3Config{ - Enabled: cfg.S3.Enabled, - Endpoint: strings.TrimSpace(cfg.S3.Endpoint), - Region: strings.TrimSpace(cfg.S3.Region), - Bucket: strings.TrimSpace(cfg.S3.Bucket), - AccessKeyId: strings.TrimSpace(cfg.S3.AccessKeyID), - SecretAccessKey: strings.TrimSpace(cfg.S3.SecretAccessKey), - Prefix: strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/"), - ForcePathStyle: cfg.S3.ForcePathStyle, - UseSsl: cfg.S3.UseSSL, + S3: mapToProtoS3Config(cfg.S3), + } +} + +func mapToProtoS3Config(cfg DataManagementS3Config) *backupv1.S3Config { + return &backupv1.S3Config{ + Enabled: cfg.Enabled, + Endpoint: strings.TrimSpace(cfg.Endpoint), + Region: strings.TrimSpace(cfg.Region), + Bucket: strings.TrimSpace(cfg.Bucket), + AccessKeyId: strings.TrimSpace(cfg.AccessKeyID), + SecretAccessKey: strings.TrimSpace(cfg.SecretAccessKey), + Prefix: strings.Trim(strings.TrimSpace(cfg.Prefix), "/"), + ForcePathStyle: cfg.ForcePathStyle, + UseSsl: cfg.UseSSL, + } +} + +func mapToProtoSourceConfig(cfg DataManagementSourceConfig) *backupv1.SourceConfig { + return &backupv1.SourceConfig{ + Host: strings.TrimSpace(cfg.Host), + Port: cfg.Port, + User: strings.TrimSpace(cfg.User), + Password: strings.TrimSpace(cfg.Password), + Database: strings.TrimSpace(cfg.Database), + SslMode: strings.TrimSpace(cfg.SSLMode), + Addr: strings.TrimSpace(cfg.Addr), + Username: strings.TrimSpace(cfg.Username), + Db: cfg.DB, + ContainerName: strings.TrimSpace(cfg.ContainerName), + } +} + +func mapProtoS3Profile(profile *backupv1.S3Profile) DataManagementS3Profile { + if profile == nil { + return DataManagementS3Profile{} + } + s3Cfg := profile.GetS3() + if s3Cfg == nil { + s3Cfg = &backupv1.S3Config{} + } + return DataManagementS3Profile{ + ProfileID: profile.GetProfileId(), + Name: profile.GetName(), + IsActive: profile.GetIsActive(), + S3: DataManagementS3Config{ + Enabled: s3Cfg.GetEnabled(), + Endpoint: s3Cfg.GetEndpoint(), + Region: s3Cfg.GetRegion(), + Bucket: s3Cfg.GetBucket(), + AccessKeyID: s3Cfg.GetAccessKeyId(), + SecretAccessKeyConfigured: profile.GetSecretAccessKeyConfigured() || strings.TrimSpace(s3Cfg.GetSecretAccessKey()) != "", + Prefix: s3Cfg.GetPrefix(), + ForcePathStyle: s3Cfg.GetForcePathStyle(), + UseSSL: s3Cfg.GetUseSsl(), + }, + SecretAccessKeyConfigured: profile.GetSecretAccessKeyConfigured() || strings.TrimSpace(s3Cfg.GetSecretAccessKey()) != "", + CreatedAt: strings.TrimSpace(profile.GetCreatedAt()), + UpdatedAt: strings.TrimSpace(profile.GetUpdatedAt()), + } +} + +func mapProtoSourceProfile(profile *backupv1.SourceProfile) DataManagementSourceProfile { + if profile == nil { + return DataManagementSourceProfile{} + } + sourceCfg := profile.GetConfig() + if sourceCfg == nil { + sourceCfg = &backupv1.SourceConfig{} + } + return DataManagementSourceProfile{ + SourceType: profile.GetSourceType(), + ProfileID: profile.GetProfileId(), + Name: profile.GetName(), + IsActive: profile.GetIsActive(), + Config: DataManagementSourceConfig{ + Host: sourceCfg.GetHost(), + Port: sourceCfg.GetPort(), + User: sourceCfg.GetUser(), + Database: sourceCfg.GetDatabase(), + SSLMode: sourceCfg.GetSslMode(), + Addr: sourceCfg.GetAddr(), + Username: sourceCfg.GetUsername(), + DB: sourceCfg.GetDb(), + ContainerName: sourceCfg.GetContainerName(), }, + PasswordConfigured: profile.GetPasswordConfigured(), + CreatedAt: strings.TrimSpace(profile.GetCreatedAt()), + UpdatedAt: strings.TrimSpace(profile.GetUpdatedAt()), } } @@ -415,6 +781,9 @@ func mapProtoJob(job *backupv1.BackupJob) DataManagementBackupJob { TriggeredBy: job.GetTriggeredBy(), IdempotencyKey: job.GetIdempotencyKey(), UploadToS3: job.GetUploadToS3(), + S3ProfileID: job.GetS3ProfileId(), + PostgresID: job.GetPostgresProfileId(), + RedisID: job.GetRedisProfileId(), StartedAt: job.GetStartedAt(), FinishedAt: job.GetFinishedAt(), ErrorMessage: job.GetErrorMessage(), @@ -482,6 +851,33 @@ func validateS3Config(cfg DataManagementS3Config) error { return nil } +func validateS3ProfileInput(profileID, name string, s3Cfg DataManagementS3Config) error { + if strings.TrimSpace(profileID) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + } + if strings.TrimSpace(name) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "name is required") + } + if s3Cfg.Enabled { + return validateS3Config(s3Cfg) + } + return nil +} + +func validateSourceProfileInput(sourceType, profileID, name string) error { + sourceType = strings.TrimSpace(sourceType) + if sourceType != "postgres" && sourceType != "redis" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + } + if strings.TrimSpace(profileID) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + } + if strings.TrimSpace(name) == "" { + return infraerrors.BadRequest(backupInvalidArgumentReason, "name is required") + } + return nil +} + func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataManagementAgentInfo, error) { socketPath := s.SocketPath() dialCtx, dialCancel := context.WithTimeout(ctx, s.dialTimeout) diff --git a/backend/internal/service/data_management_grpc_test.go b/backend/internal/service/data_management_grpc_test.go index b65379b1a..5461f09ac 100644 --- a/backend/internal/service/data_management_grpc_test.go +++ b/backend/internal/service/data_management_grpc_test.go @@ -32,6 +32,12 @@ func TestMapBackupGRPCError(t *testing.T) { wantCode: 404, wantReason: backupResourceNotFoundReason, }, + { + name: "already exists", + err: grpcstatus.Error(codes.AlreadyExists, "exists"), + wantCode: 409, + wantReason: backupResourceConflictReason, + }, { name: "failed precondition", err: grpcstatus.Error(codes.FailedPrecondition, "precondition failed"), diff --git a/backup/ent/backupjob.go b/backup/ent/backupjob.go index 4079d4153..e18839be3 100644 --- a/backup/ent/backupjob.go +++ b/backup/ent/backupjob.go @@ -29,6 +29,12 @@ type BackupJob struct { IdempotencyKey string `json:"idempotency_key,omitempty"` // UploadToS3 holds the value of the "upload_to_s3" field. UploadToS3 bool `json:"upload_to_s3,omitempty"` + // S3ProfileID holds the value of the "s3_profile_id" field. + S3ProfileID string `json:"s3_profile_id,omitempty"` + // PostgresProfileID holds the value of the "postgres_profile_id" field. + PostgresProfileID string `json:"postgres_profile_id,omitempty"` + // RedisProfileID holds the value of the "redis_profile_id" field. + RedisProfileID string `json:"redis_profile_id,omitempty"` // StartedAt holds the value of the "started_at" field. StartedAt *time.Time `json:"started_at,omitempty"` // FinishedAt holds the value of the "finished_at" field. @@ -84,7 +90,7 @@ func (*BackupJob) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullBool) case backupjob.FieldID, backupjob.FieldArtifactSizeBytes: values[i] = new(sql.NullInt64) - case backupjob.FieldJobID, backupjob.FieldBackupType, backupjob.FieldStatus, backupjob.FieldTriggeredBy, backupjob.FieldIdempotencyKey, backupjob.FieldErrorMessage, backupjob.FieldArtifactLocalPath, backupjob.FieldArtifactSha256, backupjob.FieldS3Bucket, backupjob.FieldS3Key, backupjob.FieldS3Etag: + case backupjob.FieldJobID, backupjob.FieldBackupType, backupjob.FieldStatus, backupjob.FieldTriggeredBy, backupjob.FieldIdempotencyKey, backupjob.FieldS3ProfileID, backupjob.FieldPostgresProfileID, backupjob.FieldRedisProfileID, backupjob.FieldErrorMessage, backupjob.FieldArtifactLocalPath, backupjob.FieldArtifactSha256, backupjob.FieldS3Bucket, backupjob.FieldS3Key, backupjob.FieldS3Etag: values[i] = new(sql.NullString) case backupjob.FieldStartedAt, backupjob.FieldFinishedAt, backupjob.FieldCreatedAt, backupjob.FieldUpdatedAt: values[i] = new(sql.NullTime) @@ -145,6 +151,24 @@ func (_m *BackupJob) assignValues(columns []string, values []any) error { } else if value.Valid { _m.UploadToS3 = value.Bool } + case backupjob.FieldS3ProfileID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field s3_profile_id", values[i]) + } else if value.Valid { + _m.S3ProfileID = value.String + } + case backupjob.FieldPostgresProfileID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field postgres_profile_id", values[i]) + } else if value.Valid { + _m.PostgresProfileID = value.String + } + case backupjob.FieldRedisProfileID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field redis_profile_id", values[i]) + } else if value.Valid { + _m.RedisProfileID = value.String + } case backupjob.FieldStartedAt: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field started_at", values[i]) @@ -273,6 +297,15 @@ func (_m *BackupJob) String() string { builder.WriteString("upload_to_s3=") builder.WriteString(fmt.Sprintf("%v", _m.UploadToS3)) builder.WriteString(", ") + builder.WriteString("s3_profile_id=") + builder.WriteString(_m.S3ProfileID) + builder.WriteString(", ") + builder.WriteString("postgres_profile_id=") + builder.WriteString(_m.PostgresProfileID) + builder.WriteString(", ") + builder.WriteString("redis_profile_id=") + builder.WriteString(_m.RedisProfileID) + builder.WriteString(", ") if v := _m.StartedAt; v != nil { builder.WriteString("started_at=") builder.WriteString(v.Format(time.ANSIC)) diff --git a/backup/ent/backupjob/backupjob.go b/backup/ent/backupjob/backupjob.go index 2411e00b2..1f759a8a6 100644 --- a/backup/ent/backupjob/backupjob.go +++ b/backup/ent/backupjob/backupjob.go @@ -27,6 +27,12 @@ const ( FieldIdempotencyKey = "idempotency_key" // FieldUploadToS3 holds the string denoting the upload_to_s3 field in the database. FieldUploadToS3 = "upload_to_s3" + // FieldS3ProfileID holds the string denoting the s3_profile_id field in the database. + FieldS3ProfileID = "s3_profile_id" + // FieldPostgresProfileID holds the string denoting the postgres_profile_id field in the database. + FieldPostgresProfileID = "postgres_profile_id" + // FieldRedisProfileID holds the string denoting the redis_profile_id field in the database. + FieldRedisProfileID = "redis_profile_id" // FieldStartedAt holds the string denoting the started_at field in the database. FieldStartedAt = "started_at" // FieldFinishedAt holds the string denoting the finished_at field in the database. @@ -71,6 +77,9 @@ var Columns = []string{ FieldTriggeredBy, FieldIdempotencyKey, FieldUploadToS3, + FieldS3ProfileID, + FieldPostgresProfileID, + FieldRedisProfileID, FieldStartedAt, FieldFinishedAt, FieldErrorMessage, @@ -198,6 +207,21 @@ func ByUploadToS3(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldUploadToS3, opts...).ToFunc() } +// ByS3ProfileID orders the results by the s3_profile_id field. +func ByS3ProfileID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldS3ProfileID, opts...).ToFunc() +} + +// ByPostgresProfileID orders the results by the postgres_profile_id field. +func ByPostgresProfileID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPostgresProfileID, opts...).ToFunc() +} + +// ByRedisProfileID orders the results by the redis_profile_id field. +func ByRedisProfileID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRedisProfileID, opts...).ToFunc() +} + // ByStartedAt orders the results by the started_at field. func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldStartedAt, opts...).ToFunc() diff --git a/backup/ent/backupjob/where.go b/backup/ent/backupjob/where.go index ef02344f3..816399de2 100644 --- a/backup/ent/backupjob/where.go +++ b/backup/ent/backupjob/where.go @@ -75,6 +75,21 @@ func UploadToS3(v bool) predicate.BackupJob { return predicate.BackupJob(sql.FieldEQ(FieldUploadToS3, v)) } +// S3ProfileID applies equality check predicate on the "s3_profile_id" field. It's identical to S3ProfileIDEQ. +func S3ProfileID(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3ProfileID, v)) +} + +// PostgresProfileID applies equality check predicate on the "postgres_profile_id" field. It's identical to PostgresProfileIDEQ. +func PostgresProfileID(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldPostgresProfileID, v)) +} + +// RedisProfileID applies equality check predicate on the "redis_profile_id" field. It's identical to RedisProfileIDEQ. +func RedisProfileID(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldRedisProfileID, v)) +} + // StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. func StartedAt(v time.Time) predicate.BackupJob { return predicate.BackupJob(sql.FieldEQ(FieldStartedAt, v)) @@ -385,6 +400,231 @@ func UploadToS3NEQ(v bool) predicate.BackupJob { return predicate.BackupJob(sql.FieldNEQ(FieldUploadToS3, v)) } +// S3ProfileIDEQ applies the EQ predicate on the "s3_profile_id" field. +func S3ProfileIDEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldS3ProfileID, v)) +} + +// S3ProfileIDNEQ applies the NEQ predicate on the "s3_profile_id" field. +func S3ProfileIDNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldS3ProfileID, v)) +} + +// S3ProfileIDIn applies the In predicate on the "s3_profile_id" field. +func S3ProfileIDIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldS3ProfileID, vs...)) +} + +// S3ProfileIDNotIn applies the NotIn predicate on the "s3_profile_id" field. +func S3ProfileIDNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldS3ProfileID, vs...)) +} + +// S3ProfileIDGT applies the GT predicate on the "s3_profile_id" field. +func S3ProfileIDGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldS3ProfileID, v)) +} + +// S3ProfileIDGTE applies the GTE predicate on the "s3_profile_id" field. +func S3ProfileIDGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldS3ProfileID, v)) +} + +// S3ProfileIDLT applies the LT predicate on the "s3_profile_id" field. +func S3ProfileIDLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldS3ProfileID, v)) +} + +// S3ProfileIDLTE applies the LTE predicate on the "s3_profile_id" field. +func S3ProfileIDLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldS3ProfileID, v)) +} + +// S3ProfileIDContains applies the Contains predicate on the "s3_profile_id" field. +func S3ProfileIDContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldS3ProfileID, v)) +} + +// S3ProfileIDHasPrefix applies the HasPrefix predicate on the "s3_profile_id" field. +func S3ProfileIDHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldS3ProfileID, v)) +} + +// S3ProfileIDHasSuffix applies the HasSuffix predicate on the "s3_profile_id" field. +func S3ProfileIDHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldS3ProfileID, v)) +} + +// S3ProfileIDIsNil applies the IsNil predicate on the "s3_profile_id" field. +func S3ProfileIDIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldS3ProfileID)) +} + +// S3ProfileIDNotNil applies the NotNil predicate on the "s3_profile_id" field. +func S3ProfileIDNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldS3ProfileID)) +} + +// S3ProfileIDEqualFold applies the EqualFold predicate on the "s3_profile_id" field. +func S3ProfileIDEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldS3ProfileID, v)) +} + +// S3ProfileIDContainsFold applies the ContainsFold predicate on the "s3_profile_id" field. +func S3ProfileIDContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldS3ProfileID, v)) +} + +// PostgresProfileIDEQ applies the EQ predicate on the "postgres_profile_id" field. +func PostgresProfileIDEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDNEQ applies the NEQ predicate on the "postgres_profile_id" field. +func PostgresProfileIDNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDIn applies the In predicate on the "postgres_profile_id" field. +func PostgresProfileIDIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldPostgresProfileID, vs...)) +} + +// PostgresProfileIDNotIn applies the NotIn predicate on the "postgres_profile_id" field. +func PostgresProfileIDNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldPostgresProfileID, vs...)) +} + +// PostgresProfileIDGT applies the GT predicate on the "postgres_profile_id" field. +func PostgresProfileIDGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDGTE applies the GTE predicate on the "postgres_profile_id" field. +func PostgresProfileIDGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDLT applies the LT predicate on the "postgres_profile_id" field. +func PostgresProfileIDLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDLTE applies the LTE predicate on the "postgres_profile_id" field. +func PostgresProfileIDLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDContains applies the Contains predicate on the "postgres_profile_id" field. +func PostgresProfileIDContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDHasPrefix applies the HasPrefix predicate on the "postgres_profile_id" field. +func PostgresProfileIDHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDHasSuffix applies the HasSuffix predicate on the "postgres_profile_id" field. +func PostgresProfileIDHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDIsNil applies the IsNil predicate on the "postgres_profile_id" field. +func PostgresProfileIDIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldPostgresProfileID)) +} + +// PostgresProfileIDNotNil applies the NotNil predicate on the "postgres_profile_id" field. +func PostgresProfileIDNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldPostgresProfileID)) +} + +// PostgresProfileIDEqualFold applies the EqualFold predicate on the "postgres_profile_id" field. +func PostgresProfileIDEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldPostgresProfileID, v)) +} + +// PostgresProfileIDContainsFold applies the ContainsFold predicate on the "postgres_profile_id" field. +func PostgresProfileIDContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldPostgresProfileID, v)) +} + +// RedisProfileIDEQ applies the EQ predicate on the "redis_profile_id" field. +func RedisProfileIDEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEQ(FieldRedisProfileID, v)) +} + +// RedisProfileIDNEQ applies the NEQ predicate on the "redis_profile_id" field. +func RedisProfileIDNEQ(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNEQ(FieldRedisProfileID, v)) +} + +// RedisProfileIDIn applies the In predicate on the "redis_profile_id" field. +func RedisProfileIDIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldIn(FieldRedisProfileID, vs...)) +} + +// RedisProfileIDNotIn applies the NotIn predicate on the "redis_profile_id" field. +func RedisProfileIDNotIn(vs ...string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotIn(FieldRedisProfileID, vs...)) +} + +// RedisProfileIDGT applies the GT predicate on the "redis_profile_id" field. +func RedisProfileIDGT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGT(FieldRedisProfileID, v)) +} + +// RedisProfileIDGTE applies the GTE predicate on the "redis_profile_id" field. +func RedisProfileIDGTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldGTE(FieldRedisProfileID, v)) +} + +// RedisProfileIDLT applies the LT predicate on the "redis_profile_id" field. +func RedisProfileIDLT(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLT(FieldRedisProfileID, v)) +} + +// RedisProfileIDLTE applies the LTE predicate on the "redis_profile_id" field. +func RedisProfileIDLTE(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldLTE(FieldRedisProfileID, v)) +} + +// RedisProfileIDContains applies the Contains predicate on the "redis_profile_id" field. +func RedisProfileIDContains(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContains(FieldRedisProfileID, v)) +} + +// RedisProfileIDHasPrefix applies the HasPrefix predicate on the "redis_profile_id" field. +func RedisProfileIDHasPrefix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasPrefix(FieldRedisProfileID, v)) +} + +// RedisProfileIDHasSuffix applies the HasSuffix predicate on the "redis_profile_id" field. +func RedisProfileIDHasSuffix(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldHasSuffix(FieldRedisProfileID, v)) +} + +// RedisProfileIDIsNil applies the IsNil predicate on the "redis_profile_id" field. +func RedisProfileIDIsNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldIsNull(FieldRedisProfileID)) +} + +// RedisProfileIDNotNil applies the NotNil predicate on the "redis_profile_id" field. +func RedisProfileIDNotNil() predicate.BackupJob { + return predicate.BackupJob(sql.FieldNotNull(FieldRedisProfileID)) +} + +// RedisProfileIDEqualFold applies the EqualFold predicate on the "redis_profile_id" field. +func RedisProfileIDEqualFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldEqualFold(FieldRedisProfileID, v)) +} + +// RedisProfileIDContainsFold applies the ContainsFold predicate on the "redis_profile_id" field. +func RedisProfileIDContainsFold(v string) predicate.BackupJob { + return predicate.BackupJob(sql.FieldContainsFold(FieldRedisProfileID, v)) +} + // StartedAtEQ applies the EQ predicate on the "started_at" field. func StartedAtEQ(v time.Time) predicate.BackupJob { return predicate.BackupJob(sql.FieldEQ(FieldStartedAt, v)) diff --git a/backup/ent/backupjob_create.go b/backup/ent/backupjob_create.go index 460996fe4..8bc60bba8 100644 --- a/backup/ent/backupjob_create.go +++ b/backup/ent/backupjob_create.go @@ -89,6 +89,48 @@ func (_c *BackupJobCreate) SetNillableUploadToS3(v *bool) *BackupJobCreate { return _c } +// SetS3ProfileID sets the "s3_profile_id" field. +func (_c *BackupJobCreate) SetS3ProfileID(v string) *BackupJobCreate { + _c.mutation.SetS3ProfileID(v) + return _c +} + +// SetNillableS3ProfileID sets the "s3_profile_id" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableS3ProfileID(v *string) *BackupJobCreate { + if v != nil { + _c.SetS3ProfileID(*v) + } + return _c +} + +// SetPostgresProfileID sets the "postgres_profile_id" field. +func (_c *BackupJobCreate) SetPostgresProfileID(v string) *BackupJobCreate { + _c.mutation.SetPostgresProfileID(v) + return _c +} + +// SetNillablePostgresProfileID sets the "postgres_profile_id" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillablePostgresProfileID(v *string) *BackupJobCreate { + if v != nil { + _c.SetPostgresProfileID(*v) + } + return _c +} + +// SetRedisProfileID sets the "redis_profile_id" field. +func (_c *BackupJobCreate) SetRedisProfileID(v string) *BackupJobCreate { + _c.mutation.SetRedisProfileID(v) + return _c +} + +// SetNillableRedisProfileID sets the "redis_profile_id" field if the given value is not nil. +func (_c *BackupJobCreate) SetNillableRedisProfileID(v *string) *BackupJobCreate { + if v != nil { + _c.SetRedisProfileID(*v) + } + return _c +} + // SetStartedAt sets the "started_at" field. func (_c *BackupJobCreate) SetStartedAt(v time.Time) *BackupJobCreate { _c.mutation.SetStartedAt(v) @@ -398,6 +440,18 @@ func (_c *BackupJobCreate) createSpec() (*BackupJob, *sqlgraph.CreateSpec) { _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) _node.UploadToS3 = value } + if value, ok := _c.mutation.S3ProfileID(); ok { + _spec.SetField(backupjob.FieldS3ProfileID, field.TypeString, value) + _node.S3ProfileID = value + } + if value, ok := _c.mutation.PostgresProfileID(); ok { + _spec.SetField(backupjob.FieldPostgresProfileID, field.TypeString, value) + _node.PostgresProfileID = value + } + if value, ok := _c.mutation.RedisProfileID(); ok { + _spec.SetField(backupjob.FieldRedisProfileID, field.TypeString, value) + _node.RedisProfileID = value + } if value, ok := _c.mutation.StartedAt(); ok { _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) _node.StartedAt = &value diff --git a/backup/ent/backupjob_update.go b/backup/ent/backupjob_update.go index 5eeb5d765..59b4230f7 100644 --- a/backup/ent/backupjob_update.go +++ b/backup/ent/backupjob_update.go @@ -119,6 +119,66 @@ func (_u *BackupJobUpdate) SetNillableUploadToS3(v *bool) *BackupJobUpdate { return _u } +// SetS3ProfileID sets the "s3_profile_id" field. +func (_u *BackupJobUpdate) SetS3ProfileID(v string) *BackupJobUpdate { + _u.mutation.SetS3ProfileID(v) + return _u +} + +// SetNillableS3ProfileID sets the "s3_profile_id" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableS3ProfileID(v *string) *BackupJobUpdate { + if v != nil { + _u.SetS3ProfileID(*v) + } + return _u +} + +// ClearS3ProfileID clears the value of the "s3_profile_id" field. +func (_u *BackupJobUpdate) ClearS3ProfileID() *BackupJobUpdate { + _u.mutation.ClearS3ProfileID() + return _u +} + +// SetPostgresProfileID sets the "postgres_profile_id" field. +func (_u *BackupJobUpdate) SetPostgresProfileID(v string) *BackupJobUpdate { + _u.mutation.SetPostgresProfileID(v) + return _u +} + +// SetNillablePostgresProfileID sets the "postgres_profile_id" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillablePostgresProfileID(v *string) *BackupJobUpdate { + if v != nil { + _u.SetPostgresProfileID(*v) + } + return _u +} + +// ClearPostgresProfileID clears the value of the "postgres_profile_id" field. +func (_u *BackupJobUpdate) ClearPostgresProfileID() *BackupJobUpdate { + _u.mutation.ClearPostgresProfileID() + return _u +} + +// SetRedisProfileID sets the "redis_profile_id" field. +func (_u *BackupJobUpdate) SetRedisProfileID(v string) *BackupJobUpdate { + _u.mutation.SetRedisProfileID(v) + return _u +} + +// SetNillableRedisProfileID sets the "redis_profile_id" field if the given value is not nil. +func (_u *BackupJobUpdate) SetNillableRedisProfileID(v *string) *BackupJobUpdate { + if v != nil { + _u.SetRedisProfileID(*v) + } + return _u +} + +// ClearRedisProfileID clears the value of the "redis_profile_id" field. +func (_u *BackupJobUpdate) ClearRedisProfileID() *BackupJobUpdate { + _u.mutation.ClearRedisProfileID() + return _u +} + // SetStartedAt sets the "started_at" field. func (_u *BackupJobUpdate) SetStartedAt(v time.Time) *BackupJobUpdate { _u.mutation.SetStartedAt(v) @@ -437,6 +497,24 @@ func (_u *BackupJobUpdate) sqlSave(ctx context.Context) (_node int, err error) { if value, ok := _u.mutation.UploadToS3(); ok { _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) } + if value, ok := _u.mutation.S3ProfileID(); ok { + _spec.SetField(backupjob.FieldS3ProfileID, field.TypeString, value) + } + if _u.mutation.S3ProfileIDCleared() { + _spec.ClearField(backupjob.FieldS3ProfileID, field.TypeString) + } + if value, ok := _u.mutation.PostgresProfileID(); ok { + _spec.SetField(backupjob.FieldPostgresProfileID, field.TypeString, value) + } + if _u.mutation.PostgresProfileIDCleared() { + _spec.ClearField(backupjob.FieldPostgresProfileID, field.TypeString) + } + if value, ok := _u.mutation.RedisProfileID(); ok { + _spec.SetField(backupjob.FieldRedisProfileID, field.TypeString, value) + } + if _u.mutation.RedisProfileIDCleared() { + _spec.ClearField(backupjob.FieldRedisProfileID, field.TypeString) + } if value, ok := _u.mutation.StartedAt(); ok { _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) } @@ -652,6 +730,66 @@ func (_u *BackupJobUpdateOne) SetNillableUploadToS3(v *bool) *BackupJobUpdateOne return _u } +// SetS3ProfileID sets the "s3_profile_id" field. +func (_u *BackupJobUpdateOne) SetS3ProfileID(v string) *BackupJobUpdateOne { + _u.mutation.SetS3ProfileID(v) + return _u +} + +// SetNillableS3ProfileID sets the "s3_profile_id" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableS3ProfileID(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetS3ProfileID(*v) + } + return _u +} + +// ClearS3ProfileID clears the value of the "s3_profile_id" field. +func (_u *BackupJobUpdateOne) ClearS3ProfileID() *BackupJobUpdateOne { + _u.mutation.ClearS3ProfileID() + return _u +} + +// SetPostgresProfileID sets the "postgres_profile_id" field. +func (_u *BackupJobUpdateOne) SetPostgresProfileID(v string) *BackupJobUpdateOne { + _u.mutation.SetPostgresProfileID(v) + return _u +} + +// SetNillablePostgresProfileID sets the "postgres_profile_id" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillablePostgresProfileID(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetPostgresProfileID(*v) + } + return _u +} + +// ClearPostgresProfileID clears the value of the "postgres_profile_id" field. +func (_u *BackupJobUpdateOne) ClearPostgresProfileID() *BackupJobUpdateOne { + _u.mutation.ClearPostgresProfileID() + return _u +} + +// SetRedisProfileID sets the "redis_profile_id" field. +func (_u *BackupJobUpdateOne) SetRedisProfileID(v string) *BackupJobUpdateOne { + _u.mutation.SetRedisProfileID(v) + return _u +} + +// SetNillableRedisProfileID sets the "redis_profile_id" field if the given value is not nil. +func (_u *BackupJobUpdateOne) SetNillableRedisProfileID(v *string) *BackupJobUpdateOne { + if v != nil { + _u.SetRedisProfileID(*v) + } + return _u +} + +// ClearRedisProfileID clears the value of the "redis_profile_id" field. +func (_u *BackupJobUpdateOne) ClearRedisProfileID() *BackupJobUpdateOne { + _u.mutation.ClearRedisProfileID() + return _u +} + // SetStartedAt sets the "started_at" field. func (_u *BackupJobUpdateOne) SetStartedAt(v time.Time) *BackupJobUpdateOne { _u.mutation.SetStartedAt(v) @@ -1000,6 +1138,24 @@ func (_u *BackupJobUpdateOne) sqlSave(ctx context.Context) (_node *BackupJob, er if value, ok := _u.mutation.UploadToS3(); ok { _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) } + if value, ok := _u.mutation.S3ProfileID(); ok { + _spec.SetField(backupjob.FieldS3ProfileID, field.TypeString, value) + } + if _u.mutation.S3ProfileIDCleared() { + _spec.ClearField(backupjob.FieldS3ProfileID, field.TypeString) + } + if value, ok := _u.mutation.PostgresProfileID(); ok { + _spec.SetField(backupjob.FieldPostgresProfileID, field.TypeString, value) + } + if _u.mutation.PostgresProfileIDCleared() { + _spec.ClearField(backupjob.FieldPostgresProfileID, field.TypeString) + } + if value, ok := _u.mutation.RedisProfileID(); ok { + _spec.SetField(backupjob.FieldRedisProfileID, field.TypeString, value) + } + if _u.mutation.RedisProfileIDCleared() { + _spec.ClearField(backupjob.FieldRedisProfileID, field.TypeString) + } if value, ok := _u.mutation.StartedAt(); ok { _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) } diff --git a/backup/ent/backups3config.go b/backup/ent/backups3config.go index 2678d3b34..c9bfaaa3a 100644 --- a/backup/ent/backups3config.go +++ b/backup/ent/backups3config.go @@ -17,6 +17,12 @@ type BackupS3Config struct { config `json:"-"` // ID of the ent. ID int `json:"id,omitempty"` + // ProfileID holds the value of the "profile_id" field. + ProfileID string `json:"profile_id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // IsActive holds the value of the "is_active" field. + IsActive bool `json:"is_active,omitempty"` // Enabled holds the value of the "enabled" field. Enabled bool `json:"enabled,omitempty"` // Endpoint holds the value of the "endpoint" field. @@ -47,11 +53,11 @@ func (*BackupS3Config) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { - case backups3config.FieldEnabled, backups3config.FieldForcePathStyle, backups3config.FieldUseSsl: + case backups3config.FieldIsActive, backups3config.FieldEnabled, backups3config.FieldForcePathStyle, backups3config.FieldUseSsl: values[i] = new(sql.NullBool) case backups3config.FieldID: values[i] = new(sql.NullInt64) - case backups3config.FieldEndpoint, backups3config.FieldRegion, backups3config.FieldBucket, backups3config.FieldAccessKeyID, backups3config.FieldSecretAccessKeyEncrypted, backups3config.FieldPrefix: + case backups3config.FieldProfileID, backups3config.FieldName, backups3config.FieldEndpoint, backups3config.FieldRegion, backups3config.FieldBucket, backups3config.FieldAccessKeyID, backups3config.FieldSecretAccessKeyEncrypted, backups3config.FieldPrefix: values[i] = new(sql.NullString) case backups3config.FieldCreatedAt, backups3config.FieldUpdatedAt: values[i] = new(sql.NullTime) @@ -76,6 +82,24 @@ func (_m *BackupS3Config) assignValues(columns []string, values []any) error { return fmt.Errorf("unexpected type %T for field id", value) } _m.ID = int(value.Int64) + case backups3config.FieldProfileID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field profile_id", values[i]) + } else if value.Valid { + _m.ProfileID = value.String + } + case backups3config.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case backups3config.FieldIsActive: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_active", values[i]) + } else if value.Valid { + _m.IsActive = value.Bool + } case backups3config.FieldEnabled: if value, ok := values[i].(*sql.NullBool); !ok { return fmt.Errorf("unexpected type %T for field enabled", values[i]) @@ -178,6 +202,15 @@ func (_m *BackupS3Config) String() string { var builder strings.Builder builder.WriteString("BackupS3Config(") builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("profile_id=") + builder.WriteString(_m.ProfileID) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("is_active=") + builder.WriteString(fmt.Sprintf("%v", _m.IsActive)) + builder.WriteString(", ") builder.WriteString("enabled=") builder.WriteString(fmt.Sprintf("%v", _m.Enabled)) builder.WriteString(", ") diff --git a/backup/ent/backups3config/backups3config.go b/backup/ent/backups3config/backups3config.go index b93dbbe8b..1fdef668e 100644 --- a/backup/ent/backups3config/backups3config.go +++ b/backup/ent/backups3config/backups3config.go @@ -13,6 +13,12 @@ const ( Label = "backup_s3config" // FieldID holds the string denoting the id field in the database. FieldID = "id" + // FieldProfileID holds the string denoting the profile_id field in the database. + FieldProfileID = "profile_id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldIsActive holds the string denoting the is_active field in the database. + FieldIsActive = "is_active" // FieldEnabled holds the string denoting the enabled field in the database. FieldEnabled = "enabled" // FieldEndpoint holds the string denoting the endpoint field in the database. @@ -42,6 +48,9 @@ const ( // Columns holds all SQL columns for backups3config fields. var Columns = []string{ FieldID, + FieldProfileID, + FieldName, + FieldIsActive, FieldEnabled, FieldEndpoint, FieldRegion, @@ -66,6 +75,12 @@ func ValidColumn(column string) bool { } var ( + // DefaultProfileID holds the default value on creation for the "profile_id" field. + DefaultProfileID string + // DefaultName holds the default value on creation for the "name" field. + DefaultName string + // DefaultIsActive holds the default value on creation for the "is_active" field. + DefaultIsActive bool // DefaultEnabled holds the default value on creation for the "enabled" field. DefaultEnabled bool // DefaultEndpoint holds the default value on creation for the "endpoint" field. @@ -98,6 +113,21 @@ func ByID(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldID, opts...).ToFunc() } +// ByProfileID orders the results by the profile_id field. +func ByProfileID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProfileID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByIsActive orders the results by the is_active field. +func ByIsActive(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsActive, opts...).ToFunc() +} + // ByEnabled orders the results by the enabled field. func ByEnabled(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldEnabled, opts...).ToFunc() diff --git a/backup/ent/backups3config/where.go b/backup/ent/backups3config/where.go index 8c3cf953a..0673d0f8d 100644 --- a/backup/ent/backups3config/where.go +++ b/backup/ent/backups3config/where.go @@ -54,6 +54,21 @@ func IDLTE(id int) predicate.BackupS3Config { return predicate.BackupS3Config(sql.FieldLTE(FieldID, id)) } +// ProfileID applies equality check predicate on the "profile_id" field. It's identical to ProfileIDEQ. +func ProfileID(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldProfileID, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldName, v)) +} + +// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ. +func IsActive(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldIsActive, v)) +} + // Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ. func Enabled(v bool) predicate.BackupS3Config { return predicate.BackupS3Config(sql.FieldEQ(FieldEnabled, v)) @@ -109,6 +124,146 @@ func UpdatedAt(v time.Time) predicate.BackupS3Config { return predicate.BackupS3Config(sql.FieldEQ(FieldUpdatedAt, v)) } +// ProfileIDEQ applies the EQ predicate on the "profile_id" field. +func ProfileIDEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldProfileID, v)) +} + +// ProfileIDNEQ applies the NEQ predicate on the "profile_id" field. +func ProfileIDNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldProfileID, v)) +} + +// ProfileIDIn applies the In predicate on the "profile_id" field. +func ProfileIDIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldProfileID, vs...)) +} + +// ProfileIDNotIn applies the NotIn predicate on the "profile_id" field. +func ProfileIDNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldProfileID, vs...)) +} + +// ProfileIDGT applies the GT predicate on the "profile_id" field. +func ProfileIDGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldProfileID, v)) +} + +// ProfileIDGTE applies the GTE predicate on the "profile_id" field. +func ProfileIDGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldProfileID, v)) +} + +// ProfileIDLT applies the LT predicate on the "profile_id" field. +func ProfileIDLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldProfileID, v)) +} + +// ProfileIDLTE applies the LTE predicate on the "profile_id" field. +func ProfileIDLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldProfileID, v)) +} + +// ProfileIDContains applies the Contains predicate on the "profile_id" field. +func ProfileIDContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldProfileID, v)) +} + +// ProfileIDHasPrefix applies the HasPrefix predicate on the "profile_id" field. +func ProfileIDHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldProfileID, v)) +} + +// ProfileIDHasSuffix applies the HasSuffix predicate on the "profile_id" field. +func ProfileIDHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldProfileID, v)) +} + +// ProfileIDEqualFold applies the EqualFold predicate on the "profile_id" field. +func ProfileIDEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldProfileID, v)) +} + +// ProfileIDContainsFold applies the ContainsFold predicate on the "profile_id" field. +func ProfileIDContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldProfileID, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldContainsFold(FieldName, v)) +} + +// IsActiveEQ applies the EQ predicate on the "is_active" field. +func IsActiveEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldEQ(FieldIsActive, v)) +} + +// IsActiveNEQ applies the NEQ predicate on the "is_active" field. +func IsActiveNEQ(v bool) predicate.BackupS3Config { + return predicate.BackupS3Config(sql.FieldNEQ(FieldIsActive, v)) +} + // EnabledEQ applies the EQ predicate on the "enabled" field. func EnabledEQ(v bool) predicate.BackupS3Config { return predicate.BackupS3Config(sql.FieldEQ(FieldEnabled, v)) diff --git a/backup/ent/backups3config_create.go b/backup/ent/backups3config_create.go index 268593efe..d4421b7a7 100644 --- a/backup/ent/backups3config_create.go +++ b/backup/ent/backups3config_create.go @@ -20,6 +20,48 @@ type BackupS3ConfigCreate struct { hooks []Hook } +// SetProfileID sets the "profile_id" field. +func (_c *BackupS3ConfigCreate) SetProfileID(v string) *BackupS3ConfigCreate { + _c.mutation.SetProfileID(v) + return _c +} + +// SetNillableProfileID sets the "profile_id" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableProfileID(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetProfileID(*v) + } + return _c +} + +// SetName sets the "name" field. +func (_c *BackupS3ConfigCreate) SetName(v string) *BackupS3ConfigCreate { + _c.mutation.SetName(v) + return _c +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableName(v *string) *BackupS3ConfigCreate { + if v != nil { + _c.SetName(*v) + } + return _c +} + +// SetIsActive sets the "is_active" field. +func (_c *BackupS3ConfigCreate) SetIsActive(v bool) *BackupS3ConfigCreate { + _c.mutation.SetIsActive(v) + return _c +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (_c *BackupS3ConfigCreate) SetNillableIsActive(v *bool) *BackupS3ConfigCreate { + if v != nil { + _c.SetIsActive(*v) + } + return _c +} + // SetEnabled sets the "enabled" field. func (_c *BackupS3ConfigCreate) SetEnabled(v bool) *BackupS3ConfigCreate { _c.mutation.SetEnabled(v) @@ -209,6 +251,18 @@ func (_c *BackupS3ConfigCreate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (_c *BackupS3ConfigCreate) defaults() { + if _, ok := _c.mutation.ProfileID(); !ok { + v := backups3config.DefaultProfileID + _c.mutation.SetProfileID(v) + } + if _, ok := _c.mutation.Name(); !ok { + v := backups3config.DefaultName + _c.mutation.SetName(v) + } + if _, ok := _c.mutation.IsActive(); !ok { + v := backups3config.DefaultIsActive + _c.mutation.SetIsActive(v) + } if _, ok := _c.mutation.Enabled(); !ok { v := backups3config.DefaultEnabled _c.mutation.SetEnabled(v) @@ -253,6 +307,15 @@ func (_c *BackupS3ConfigCreate) defaults() { // check runs all checks and user-defined validators on the builder. func (_c *BackupS3ConfigCreate) check() error { + if _, ok := _c.mutation.ProfileID(); !ok { + return &ValidationError{Name: "profile_id", err: errors.New(`ent: missing required field "BackupS3Config.profile_id"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "BackupS3Config.name"`)} + } + if _, ok := _c.mutation.IsActive(); !ok { + return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "BackupS3Config.is_active"`)} + } if _, ok := _c.mutation.Enabled(); !ok { return &ValidationError{Name: "enabled", err: errors.New(`ent: missing required field "BackupS3Config.enabled"`)} } @@ -309,6 +372,18 @@ func (_c *BackupS3ConfigCreate) createSpec() (*BackupS3Config, *sqlgraph.CreateS _node = &BackupS3Config{config: _c.config} _spec = sqlgraph.NewCreateSpec(backups3config.Table, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) ) + if value, ok := _c.mutation.ProfileID(); ok { + _spec.SetField(backups3config.FieldProfileID, field.TypeString, value) + _node.ProfileID = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(backups3config.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.IsActive(); ok { + _spec.SetField(backups3config.FieldIsActive, field.TypeBool, value) + _node.IsActive = value + } if value, ok := _c.mutation.Enabled(); ok { _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) _node.Enabled = value diff --git a/backup/ent/backups3config_query.go b/backup/ent/backups3config_query.go index 11bdf7f01..9fdd0f128 100644 --- a/backup/ent/backups3config_query.go +++ b/backup/ent/backups3config_query.go @@ -262,12 +262,12 @@ func (_q *BackupS3ConfigQuery) Clone() *BackupS3ConfigQuery { // Example: // // var v []struct { -// Enabled bool `json:"enabled,omitempty"` +// ProfileID string `json:"profile_id,omitempty"` // Count int `json:"count,omitempty"` // } // // client.BackupS3Config.Query(). -// GroupBy(backups3config.FieldEnabled). +// GroupBy(backups3config.FieldProfileID). // Aggregate(ent.Count()). // Scan(ctx, &v) func (_q *BackupS3ConfigQuery) GroupBy(field string, fields ...string) *BackupS3ConfigGroupBy { @@ -285,11 +285,11 @@ func (_q *BackupS3ConfigQuery) GroupBy(field string, fields ...string) *BackupS3 // Example: // // var v []struct { -// Enabled bool `json:"enabled,omitempty"` +// ProfileID string `json:"profile_id,omitempty"` // } // // client.BackupS3Config.Query(). -// Select(backups3config.FieldEnabled). +// Select(backups3config.FieldProfileID). // Scan(ctx, &v) func (_q *BackupS3ConfigQuery) Select(fields ...string) *BackupS3ConfigSelect { _q.ctx.Fields = append(_q.ctx.Fields, fields...) diff --git a/backup/ent/backups3config_update.go b/backup/ent/backups3config_update.go index 14354b9c9..048e93d03 100644 --- a/backup/ent/backups3config_update.go +++ b/backup/ent/backups3config_update.go @@ -28,6 +28,48 @@ func (_u *BackupS3ConfigUpdate) Where(ps ...predicate.BackupS3Config) *BackupS3C return _u } +// SetProfileID sets the "profile_id" field. +func (_u *BackupS3ConfigUpdate) SetProfileID(v string) *BackupS3ConfigUpdate { + _u.mutation.SetProfileID(v) + return _u +} + +// SetNillableProfileID sets the "profile_id" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableProfileID(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetProfileID(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *BackupS3ConfigUpdate) SetName(v string) *BackupS3ConfigUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableName(v *string) *BackupS3ConfigUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetIsActive sets the "is_active" field. +func (_u *BackupS3ConfigUpdate) SetIsActive(v bool) *BackupS3ConfigUpdate { + _u.mutation.SetIsActive(v) + return _u +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (_u *BackupS3ConfigUpdate) SetNillableIsActive(v *bool) *BackupS3ConfigUpdate { + if v != nil { + _u.SetIsActive(*v) + } + return _u +} + // SetEnabled sets the "enabled" field. func (_u *BackupS3ConfigUpdate) SetEnabled(v bool) *BackupS3ConfigUpdate { _u.mutation.SetEnabled(v) @@ -216,6 +258,15 @@ func (_u *BackupS3ConfigUpdate) sqlSave(ctx context.Context) (_node int, err err } } } + if value, ok := _u.mutation.ProfileID(); ok { + _spec.SetField(backups3config.FieldProfileID, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(backups3config.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.IsActive(); ok { + _spec.SetField(backups3config.FieldIsActive, field.TypeBool, value) + } if value, ok := _u.mutation.Enabled(); ok { _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) } @@ -269,6 +320,48 @@ type BackupS3ConfigUpdateOne struct { mutation *BackupS3ConfigMutation } +// SetProfileID sets the "profile_id" field. +func (_u *BackupS3ConfigUpdateOne) SetProfileID(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetProfileID(v) + return _u +} + +// SetNillableProfileID sets the "profile_id" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableProfileID(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetProfileID(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *BackupS3ConfigUpdateOne) SetName(v string) *BackupS3ConfigUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableName(v *string) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetIsActive sets the "is_active" field. +func (_u *BackupS3ConfigUpdateOne) SetIsActive(v bool) *BackupS3ConfigUpdateOne { + _u.mutation.SetIsActive(v) + return _u +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (_u *BackupS3ConfigUpdateOne) SetNillableIsActive(v *bool) *BackupS3ConfigUpdateOne { + if v != nil { + _u.SetIsActive(*v) + } + return _u +} + // SetEnabled sets the "enabled" field. func (_u *BackupS3ConfigUpdateOne) SetEnabled(v bool) *BackupS3ConfigUpdateOne { _u.mutation.SetEnabled(v) @@ -487,6 +580,15 @@ func (_u *BackupS3ConfigUpdateOne) sqlSave(ctx context.Context) (_node *BackupS3 } } } + if value, ok := _u.mutation.ProfileID(); ok { + _spec.SetField(backups3config.FieldProfileID, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(backups3config.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.IsActive(); ok { + _spec.SetField(backups3config.FieldIsActive, field.TypeBool, value) + } if value, ok := _u.mutation.Enabled(); ok { _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) } diff --git a/backup/ent/backupsourceconfig.go b/backup/ent/backupsourceconfig.go index 705f66b7c..697a44aa7 100644 --- a/backup/ent/backupsourceconfig.go +++ b/backup/ent/backupsourceconfig.go @@ -19,6 +19,12 @@ type BackupSourceConfig struct { ID int `json:"id,omitempty"` // SourceType holds the value of the "source_type" field. SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` + // ProfileID holds the value of the "profile_id" field. + ProfileID string `json:"profile_id,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // IsActive holds the value of the "is_active" field. + IsActive bool `json:"is_active,omitempty"` // Host holds the value of the "host" field. Host string `json:"host,omitempty"` // Port holds the value of the "port" field. @@ -49,9 +55,11 @@ func (*BackupSourceConfig) scanValues(columns []string) ([]any, error) { values := make([]any, len(columns)) for i := range columns { switch columns[i] { + case backupsourceconfig.FieldIsActive: + values[i] = new(sql.NullBool) case backupsourceconfig.FieldID, backupsourceconfig.FieldPort, backupsourceconfig.FieldRedisDb: values[i] = new(sql.NullInt64) - case backupsourceconfig.FieldSourceType, backupsourceconfig.FieldHost, backupsourceconfig.FieldUsername, backupsourceconfig.FieldPasswordEncrypted, backupsourceconfig.FieldDatabase, backupsourceconfig.FieldSslMode, backupsourceconfig.FieldAddr, backupsourceconfig.FieldContainerName: + case backupsourceconfig.FieldSourceType, backupsourceconfig.FieldProfileID, backupsourceconfig.FieldName, backupsourceconfig.FieldHost, backupsourceconfig.FieldUsername, backupsourceconfig.FieldPasswordEncrypted, backupsourceconfig.FieldDatabase, backupsourceconfig.FieldSslMode, backupsourceconfig.FieldAddr, backupsourceconfig.FieldContainerName: values[i] = new(sql.NullString) case backupsourceconfig.FieldCreatedAt, backupsourceconfig.FieldUpdatedAt: values[i] = new(sql.NullTime) @@ -82,6 +90,24 @@ func (_m *BackupSourceConfig) assignValues(columns []string, values []any) error } else if value.Valid { _m.SourceType = backupsourceconfig.SourceType(value.String) } + case backupsourceconfig.FieldProfileID: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field profile_id", values[i]) + } else if value.Valid { + _m.ProfileID = value.String + } + case backupsourceconfig.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + _m.Name = value.String + } + case backupsourceconfig.FieldIsActive: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field is_active", values[i]) + } else if value.Valid { + _m.IsActive = value.Bool + } case backupsourceconfig.FieldHost: if value, ok := values[i].(*sql.NullString); !ok { return fmt.Errorf("unexpected type %T for field host", values[i]) @@ -189,6 +215,15 @@ func (_m *BackupSourceConfig) String() string { builder.WriteString("source_type=") builder.WriteString(fmt.Sprintf("%v", _m.SourceType)) builder.WriteString(", ") + builder.WriteString("profile_id=") + builder.WriteString(_m.ProfileID) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(_m.Name) + builder.WriteString(", ") + builder.WriteString("is_active=") + builder.WriteString(fmt.Sprintf("%v", _m.IsActive)) + builder.WriteString(", ") builder.WriteString("host=") builder.WriteString(_m.Host) builder.WriteString(", ") diff --git a/backup/ent/backupsourceconfig/backupsourceconfig.go b/backup/ent/backupsourceconfig/backupsourceconfig.go index b81a56b67..202ba8549 100644 --- a/backup/ent/backupsourceconfig/backupsourceconfig.go +++ b/backup/ent/backupsourceconfig/backupsourceconfig.go @@ -16,6 +16,12 @@ const ( FieldID = "id" // FieldSourceType holds the string denoting the source_type field in the database. FieldSourceType = "source_type" + // FieldProfileID holds the string denoting the profile_id field in the database. + FieldProfileID = "profile_id" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldIsActive holds the string denoting the is_active field in the database. + FieldIsActive = "is_active" // FieldHost holds the string denoting the host field in the database. FieldHost = "host" // FieldPort holds the string denoting the port field in the database. @@ -46,6 +52,9 @@ const ( var Columns = []string{ FieldID, FieldSourceType, + FieldProfileID, + FieldName, + FieldIsActive, FieldHost, FieldPort, FieldUsername, @@ -70,6 +79,8 @@ func ValidColumn(column string) bool { } var ( + // DefaultIsActive holds the default value on creation for the "is_active" field. + DefaultIsActive bool // DefaultContainerName holds the default value on creation for the "container_name" field. DefaultContainerName string // DefaultCreatedAt holds the default value on creation for the "created_at" field. @@ -116,6 +127,21 @@ func BySourceType(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldSourceType, opts...).ToFunc() } +// ByProfileID orders the results by the profile_id field. +func ByProfileID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldProfileID, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByIsActive orders the results by the is_active field. +func ByIsActive(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsActive, opts...).ToFunc() +} + // ByHost orders the results by the host field. func ByHost(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldHost, opts...).ToFunc() diff --git a/backup/ent/backupsourceconfig/where.go b/backup/ent/backupsourceconfig/where.go index e749601c9..41eabc740 100644 --- a/backup/ent/backupsourceconfig/where.go +++ b/backup/ent/backupsourceconfig/where.go @@ -54,6 +54,21 @@ func IDLTE(id int) predicate.BackupSourceConfig { return predicate.BackupSourceConfig(sql.FieldLTE(FieldID, id)) } +// ProfileID applies equality check predicate on the "profile_id" field. It's identical to ProfileIDEQ. +func ProfileID(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldProfileID, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldName, v)) +} + +// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ. +func IsActive(v bool) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldIsActive, v)) +} + // Host applies equality check predicate on the "host" field. It's identical to HostEQ. func Host(v string) predicate.BackupSourceConfig { return predicate.BackupSourceConfig(sql.FieldEQ(FieldHost, v)) @@ -129,6 +144,146 @@ func SourceTypeNotIn(vs ...SourceType) predicate.BackupSourceConfig { return predicate.BackupSourceConfig(sql.FieldNotIn(FieldSourceType, vs...)) } +// ProfileIDEQ applies the EQ predicate on the "profile_id" field. +func ProfileIDEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldProfileID, v)) +} + +// ProfileIDNEQ applies the NEQ predicate on the "profile_id" field. +func ProfileIDNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldProfileID, v)) +} + +// ProfileIDIn applies the In predicate on the "profile_id" field. +func ProfileIDIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldProfileID, vs...)) +} + +// ProfileIDNotIn applies the NotIn predicate on the "profile_id" field. +func ProfileIDNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldProfileID, vs...)) +} + +// ProfileIDGT applies the GT predicate on the "profile_id" field. +func ProfileIDGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldProfileID, v)) +} + +// ProfileIDGTE applies the GTE predicate on the "profile_id" field. +func ProfileIDGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldProfileID, v)) +} + +// ProfileIDLT applies the LT predicate on the "profile_id" field. +func ProfileIDLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldProfileID, v)) +} + +// ProfileIDLTE applies the LTE predicate on the "profile_id" field. +func ProfileIDLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldProfileID, v)) +} + +// ProfileIDContains applies the Contains predicate on the "profile_id" field. +func ProfileIDContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldProfileID, v)) +} + +// ProfileIDHasPrefix applies the HasPrefix predicate on the "profile_id" field. +func ProfileIDHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldProfileID, v)) +} + +// ProfileIDHasSuffix applies the HasSuffix predicate on the "profile_id" field. +func ProfileIDHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldProfileID, v)) +} + +// ProfileIDEqualFold applies the EqualFold predicate on the "profile_id" field. +func ProfileIDEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldProfileID, v)) +} + +// ProfileIDContainsFold applies the ContainsFold predicate on the "profile_id" field. +func ProfileIDContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldProfileID, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldName, v)) +} + +// IsActiveEQ applies the EQ predicate on the "is_active" field. +func IsActiveEQ(v bool) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldEQ(FieldIsActive, v)) +} + +// IsActiveNEQ applies the NEQ predicate on the "is_active" field. +func IsActiveNEQ(v bool) predicate.BackupSourceConfig { + return predicate.BackupSourceConfig(sql.FieldNEQ(FieldIsActive, v)) +} + // HostEQ applies the EQ predicate on the "host" field. func HostEQ(v string) predicate.BackupSourceConfig { return predicate.BackupSourceConfig(sql.FieldEQ(FieldHost, v)) diff --git a/backup/ent/backupsourceconfig_create.go b/backup/ent/backupsourceconfig_create.go index a514f5ce3..ab2fae9fc 100644 --- a/backup/ent/backupsourceconfig_create.go +++ b/backup/ent/backupsourceconfig_create.go @@ -26,6 +26,32 @@ func (_c *BackupSourceConfigCreate) SetSourceType(v backupsourceconfig.SourceTyp return _c } +// SetProfileID sets the "profile_id" field. +func (_c *BackupSourceConfigCreate) SetProfileID(v string) *BackupSourceConfigCreate { + _c.mutation.SetProfileID(v) + return _c +} + +// SetName sets the "name" field. +func (_c *BackupSourceConfigCreate) SetName(v string) *BackupSourceConfigCreate { + _c.mutation.SetName(v) + return _c +} + +// SetIsActive sets the "is_active" field. +func (_c *BackupSourceConfigCreate) SetIsActive(v bool) *BackupSourceConfigCreate { + _c.mutation.SetIsActive(v) + return _c +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (_c *BackupSourceConfigCreate) SetNillableIsActive(v *bool) *BackupSourceConfigCreate { + if v != nil { + _c.SetIsActive(*v) + } + return _c +} + // SetHost sets the "host" field. func (_c *BackupSourceConfigCreate) SetHost(v string) *BackupSourceConfigCreate { _c.mutation.SetHost(v) @@ -215,6 +241,10 @@ func (_c *BackupSourceConfigCreate) ExecX(ctx context.Context) { // defaults sets the default values of the builder before save. func (_c *BackupSourceConfigCreate) defaults() { + if _, ok := _c.mutation.IsActive(); !ok { + v := backupsourceconfig.DefaultIsActive + _c.mutation.SetIsActive(v) + } if _, ok := _c.mutation.ContainerName(); !ok { v := backupsourceconfig.DefaultContainerName _c.mutation.SetContainerName(v) @@ -239,6 +269,15 @@ func (_c *BackupSourceConfigCreate) check() error { return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} } } + if _, ok := _c.mutation.ProfileID(); !ok { + return &ValidationError{Name: "profile_id", err: errors.New(`ent: missing required field "BackupSourceConfig.profile_id"`)} + } + if _, ok := _c.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "BackupSourceConfig.name"`)} + } + if _, ok := _c.mutation.IsActive(); !ok { + return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "BackupSourceConfig.is_active"`)} + } if _, ok := _c.mutation.ContainerName(); !ok { return &ValidationError{Name: "container_name", err: errors.New(`ent: missing required field "BackupSourceConfig.container_name"`)} } @@ -278,6 +317,18 @@ func (_c *BackupSourceConfigCreate) createSpec() (*BackupSourceConfig, *sqlgraph _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) _node.SourceType = value } + if value, ok := _c.mutation.ProfileID(); ok { + _spec.SetField(backupsourceconfig.FieldProfileID, field.TypeString, value) + _node.ProfileID = value + } + if value, ok := _c.mutation.Name(); ok { + _spec.SetField(backupsourceconfig.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := _c.mutation.IsActive(); ok { + _spec.SetField(backupsourceconfig.FieldIsActive, field.TypeBool, value) + _node.IsActive = value + } if value, ok := _c.mutation.Host(); ok { _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) _node.Host = value diff --git a/backup/ent/backupsourceconfig_update.go b/backup/ent/backupsourceconfig_update.go index d841b02d5..4424cc485 100644 --- a/backup/ent/backupsourceconfig_update.go +++ b/backup/ent/backupsourceconfig_update.go @@ -42,6 +42,48 @@ func (_u *BackupSourceConfigUpdate) SetNillableSourceType(v *backupsourceconfig. return _u } +// SetProfileID sets the "profile_id" field. +func (_u *BackupSourceConfigUpdate) SetProfileID(v string) *BackupSourceConfigUpdate { + _u.mutation.SetProfileID(v) + return _u +} + +// SetNillableProfileID sets the "profile_id" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableProfileID(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetProfileID(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *BackupSourceConfigUpdate) SetName(v string) *BackupSourceConfigUpdate { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableName(v *string) *BackupSourceConfigUpdate { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetIsActive sets the "is_active" field. +func (_u *BackupSourceConfigUpdate) SetIsActive(v bool) *BackupSourceConfigUpdate { + _u.mutation.SetIsActive(v) + return _u +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (_u *BackupSourceConfigUpdate) SetNillableIsActive(v *bool) *BackupSourceConfigUpdate { + if v != nil { + _u.SetIsActive(*v) + } + return _u +} + // SetHost sets the "host" field. func (_u *BackupSourceConfigUpdate) SetHost(v string) *BackupSourceConfigUpdate { _u.mutation.SetHost(v) @@ -302,6 +344,15 @@ func (_u *BackupSourceConfigUpdate) sqlSave(ctx context.Context) (_node int, err if value, ok := _u.mutation.SourceType(); ok { _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) } + if value, ok := _u.mutation.ProfileID(); ok { + _spec.SetField(backupsourceconfig.FieldProfileID, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(backupsourceconfig.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.IsActive(); ok { + _spec.SetField(backupsourceconfig.FieldIsActive, field.TypeBool, value) + } if value, ok := _u.mutation.Host(); ok { _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) } @@ -396,6 +447,48 @@ func (_u *BackupSourceConfigUpdateOne) SetNillableSourceType(v *backupsourceconf return _u } +// SetProfileID sets the "profile_id" field. +func (_u *BackupSourceConfigUpdateOne) SetProfileID(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetProfileID(v) + return _u +} + +// SetNillableProfileID sets the "profile_id" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableProfileID(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetProfileID(*v) + } + return _u +} + +// SetName sets the "name" field. +func (_u *BackupSourceConfigUpdateOne) SetName(v string) *BackupSourceConfigUpdateOne { + _u.mutation.SetName(v) + return _u +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableName(v *string) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetName(*v) + } + return _u +} + +// SetIsActive sets the "is_active" field. +func (_u *BackupSourceConfigUpdateOne) SetIsActive(v bool) *BackupSourceConfigUpdateOne { + _u.mutation.SetIsActive(v) + return _u +} + +// SetNillableIsActive sets the "is_active" field if the given value is not nil. +func (_u *BackupSourceConfigUpdateOne) SetNillableIsActive(v *bool) *BackupSourceConfigUpdateOne { + if v != nil { + _u.SetIsActive(*v) + } + return _u +} + // SetHost sets the "host" field. func (_u *BackupSourceConfigUpdateOne) SetHost(v string) *BackupSourceConfigUpdateOne { _u.mutation.SetHost(v) @@ -686,6 +779,15 @@ func (_u *BackupSourceConfigUpdateOne) sqlSave(ctx context.Context) (_node *Back if value, ok := _u.mutation.SourceType(); ok { _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) } + if value, ok := _u.mutation.ProfileID(); ok { + _spec.SetField(backupsourceconfig.FieldProfileID, field.TypeString, value) + } + if value, ok := _u.mutation.Name(); ok { + _spec.SetField(backupsourceconfig.FieldName, field.TypeString, value) + } + if value, ok := _u.mutation.IsActive(); ok { + _spec.SetField(backupsourceconfig.FieldIsActive, field.TypeBool, value) + } if value, ok := _u.mutation.Host(); ok { _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) } diff --git a/backup/ent/migrate/schema.go b/backup/ent/migrate/schema.go index 78af4965e..5d55772db 100644 --- a/backup/ent/migrate/schema.go +++ b/backup/ent/migrate/schema.go @@ -17,6 +17,9 @@ var ( {Name: "triggered_by", Type: field.TypeString, Default: "system"}, {Name: "idempotency_key", Type: field.TypeString, Nullable: true}, {Name: "upload_to_s3", Type: field.TypeBool, Default: false}, + {Name: "s3_profile_id", Type: field.TypeString, Nullable: true}, + {Name: "postgres_profile_id", Type: field.TypeString, Nullable: true}, + {Name: "redis_profile_id", Type: field.TypeString, Nullable: true}, {Name: "started_at", Type: field.TypeTime, Nullable: true}, {Name: "finished_at", Type: field.TypeTime, Nullable: true}, {Name: "error_message", Type: field.TypeString, Nullable: true}, @@ -38,18 +41,33 @@ var ( { Name: "backupjob_status_created_at", Unique: false, - Columns: []*schema.Column{BackupJobsColumns[3], BackupJobsColumns[16]}, + Columns: []*schema.Column{BackupJobsColumns[3], BackupJobsColumns[19]}, }, { Name: "backupjob_backup_type_created_at", Unique: false, - Columns: []*schema.Column{BackupJobsColumns[2], BackupJobsColumns[16]}, + Columns: []*schema.Column{BackupJobsColumns[2], BackupJobsColumns[19]}, }, { Name: "backupjob_idempotency_key", Unique: false, Columns: []*schema.Column{BackupJobsColumns[5]}, }, + { + Name: "backupjob_s3_profile_id_status", + Unique: false, + Columns: []*schema.Column{BackupJobsColumns[7], BackupJobsColumns[3]}, + }, + { + Name: "backupjob_postgres_profile_id_status", + Unique: false, + Columns: []*schema.Column{BackupJobsColumns[8], BackupJobsColumns[3]}, + }, + { + Name: "backupjob_redis_profile_id_status", + Unique: false, + Columns: []*schema.Column{BackupJobsColumns[9], BackupJobsColumns[3]}, + }, }, } // BackupJobEventsColumns holds the columns for the "backup_job_events" table. @@ -87,6 +105,9 @@ var ( // BackupS3configsColumns holds the columns for the "backup_s3configs" table. BackupS3configsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "profile_id", Type: field.TypeString, Default: "default"}, + {Name: "name", Type: field.TypeString, Default: "默认账号"}, + {Name: "is_active", Type: field.TypeBool, Default: false}, {Name: "enabled", Type: field.TypeBool, Default: false}, {Name: "endpoint", Type: field.TypeString, Default: ""}, {Name: "region", Type: field.TypeString, Default: ""}, @@ -104,6 +125,18 @@ var ( Name: "backup_s3configs", Columns: BackupS3configsColumns, PrimaryKey: []*schema.Column{BackupS3configsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "backups3config_profile_id", + Unique: true, + Columns: []*schema.Column{BackupS3configsColumns[1]}, + }, + { + Name: "backups3config_is_active", + Unique: false, + Columns: []*schema.Column{BackupS3configsColumns[3]}, + }, + }, } // BackupSettingsColumns holds the columns for the "backup_settings" table. BackupSettingsColumns = []*schema.Column{ @@ -126,6 +159,9 @@ var ( BackupSourceConfigsColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt, Increment: true}, {Name: "source_type", Type: field.TypeEnum, Enums: []string{"postgres", "redis"}}, + {Name: "profile_id", Type: field.TypeString}, + {Name: "name", Type: field.TypeString}, + {Name: "is_active", Type: field.TypeBool, Default: false}, {Name: "host", Type: field.TypeString, Nullable: true}, {Name: "port", Type: field.TypeInt, Nullable: true}, {Name: "username", Type: field.TypeString, Nullable: true}, @@ -145,9 +181,14 @@ var ( PrimaryKey: []*schema.Column{BackupSourceConfigsColumns[0]}, Indexes: []*schema.Index{ { - Name: "backupsourceconfig_source_type", + Name: "backupsourceconfig_source_type_profile_id", Unique: true, - Columns: []*schema.Column{BackupSourceConfigsColumns[1]}, + Columns: []*schema.Column{BackupSourceConfigsColumns[1], BackupSourceConfigsColumns[2]}, + }, + { + Name: "backupsourceconfig_source_type_is_active", + Unique: false, + Columns: []*schema.Column{BackupSourceConfigsColumns[1], BackupSourceConfigsColumns[4]}, }, }, } diff --git a/backup/ent/mutation.go b/backup/ent/mutation.go index 351ecea0d..e3f1fc0dc 100644 --- a/backup/ent/mutation.go +++ b/backup/ent/mutation.go @@ -47,6 +47,9 @@ type BackupJobMutation struct { triggered_by *string idempotency_key *string upload_to_s3 *bool + s3_profile_id *string + postgres_profile_id *string + redis_profile_id *string started_at *time.Time finished_at *time.Time error_message *string @@ -395,6 +398,153 @@ func (m *BackupJobMutation) ResetUploadToS3() { m.upload_to_s3 = nil } +// SetS3ProfileID sets the "s3_profile_id" field. +func (m *BackupJobMutation) SetS3ProfileID(s string) { + m.s3_profile_id = &s +} + +// S3ProfileID returns the value of the "s3_profile_id" field in the mutation. +func (m *BackupJobMutation) S3ProfileID() (r string, exists bool) { + v := m.s3_profile_id + if v == nil { + return + } + return *v, true +} + +// OldS3ProfileID returns the old "s3_profile_id" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldS3ProfileID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldS3ProfileID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldS3ProfileID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldS3ProfileID: %w", err) + } + return oldValue.S3ProfileID, nil +} + +// ClearS3ProfileID clears the value of the "s3_profile_id" field. +func (m *BackupJobMutation) ClearS3ProfileID() { + m.s3_profile_id = nil + m.clearedFields[backupjob.FieldS3ProfileID] = struct{}{} +} + +// S3ProfileIDCleared returns if the "s3_profile_id" field was cleared in this mutation. +func (m *BackupJobMutation) S3ProfileIDCleared() bool { + _, ok := m.clearedFields[backupjob.FieldS3ProfileID] + return ok +} + +// ResetS3ProfileID resets all changes to the "s3_profile_id" field. +func (m *BackupJobMutation) ResetS3ProfileID() { + m.s3_profile_id = nil + delete(m.clearedFields, backupjob.FieldS3ProfileID) +} + +// SetPostgresProfileID sets the "postgres_profile_id" field. +func (m *BackupJobMutation) SetPostgresProfileID(s string) { + m.postgres_profile_id = &s +} + +// PostgresProfileID returns the value of the "postgres_profile_id" field in the mutation. +func (m *BackupJobMutation) PostgresProfileID() (r string, exists bool) { + v := m.postgres_profile_id + if v == nil { + return + } + return *v, true +} + +// OldPostgresProfileID returns the old "postgres_profile_id" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldPostgresProfileID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPostgresProfileID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPostgresProfileID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPostgresProfileID: %w", err) + } + return oldValue.PostgresProfileID, nil +} + +// ClearPostgresProfileID clears the value of the "postgres_profile_id" field. +func (m *BackupJobMutation) ClearPostgresProfileID() { + m.postgres_profile_id = nil + m.clearedFields[backupjob.FieldPostgresProfileID] = struct{}{} +} + +// PostgresProfileIDCleared returns if the "postgres_profile_id" field was cleared in this mutation. +func (m *BackupJobMutation) PostgresProfileIDCleared() bool { + _, ok := m.clearedFields[backupjob.FieldPostgresProfileID] + return ok +} + +// ResetPostgresProfileID resets all changes to the "postgres_profile_id" field. +func (m *BackupJobMutation) ResetPostgresProfileID() { + m.postgres_profile_id = nil + delete(m.clearedFields, backupjob.FieldPostgresProfileID) +} + +// SetRedisProfileID sets the "redis_profile_id" field. +func (m *BackupJobMutation) SetRedisProfileID(s string) { + m.redis_profile_id = &s +} + +// RedisProfileID returns the value of the "redis_profile_id" field in the mutation. +func (m *BackupJobMutation) RedisProfileID() (r string, exists bool) { + v := m.redis_profile_id + if v == nil { + return + } + return *v, true +} + +// OldRedisProfileID returns the old "redis_profile_id" field's value of the BackupJob entity. +// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupJobMutation) OldRedisProfileID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRedisProfileID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRedisProfileID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRedisProfileID: %w", err) + } + return oldValue.RedisProfileID, nil +} + +// ClearRedisProfileID clears the value of the "redis_profile_id" field. +func (m *BackupJobMutation) ClearRedisProfileID() { + m.redis_profile_id = nil + m.clearedFields[backupjob.FieldRedisProfileID] = struct{}{} +} + +// RedisProfileIDCleared returns if the "redis_profile_id" field was cleared in this mutation. +func (m *BackupJobMutation) RedisProfileIDCleared() bool { + _, ok := m.clearedFields[backupjob.FieldRedisProfileID] + return ok +} + +// ResetRedisProfileID resets all changes to the "redis_profile_id" field. +func (m *BackupJobMutation) ResetRedisProfileID() { + m.redis_profile_id = nil + delete(m.clearedFields, backupjob.FieldRedisProfileID) +} + // SetStartedAt sets the "started_at" field. func (m *BackupJobMutation) SetStartedAt(t time.Time) { m.started_at = &t @@ -1017,7 +1167,7 @@ func (m *BackupJobMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BackupJobMutation) Fields() []string { - fields := make([]string, 0, 17) + fields := make([]string, 0, 20) if m.job_id != nil { fields = append(fields, backupjob.FieldJobID) } @@ -1036,6 +1186,15 @@ func (m *BackupJobMutation) Fields() []string { if m.upload_to_s3 != nil { fields = append(fields, backupjob.FieldUploadToS3) } + if m.s3_profile_id != nil { + fields = append(fields, backupjob.FieldS3ProfileID) + } + if m.postgres_profile_id != nil { + fields = append(fields, backupjob.FieldPostgresProfileID) + } + if m.redis_profile_id != nil { + fields = append(fields, backupjob.FieldRedisProfileID) + } if m.started_at != nil { fields = append(fields, backupjob.FieldStartedAt) } @@ -1089,6 +1248,12 @@ func (m *BackupJobMutation) Field(name string) (ent.Value, bool) { return m.IdempotencyKey() case backupjob.FieldUploadToS3: return m.UploadToS3() + case backupjob.FieldS3ProfileID: + return m.S3ProfileID() + case backupjob.FieldPostgresProfileID: + return m.PostgresProfileID() + case backupjob.FieldRedisProfileID: + return m.RedisProfileID() case backupjob.FieldStartedAt: return m.StartedAt() case backupjob.FieldFinishedAt: @@ -1132,6 +1297,12 @@ func (m *BackupJobMutation) OldField(ctx context.Context, name string) (ent.Valu return m.OldIdempotencyKey(ctx) case backupjob.FieldUploadToS3: return m.OldUploadToS3(ctx) + case backupjob.FieldS3ProfileID: + return m.OldS3ProfileID(ctx) + case backupjob.FieldPostgresProfileID: + return m.OldPostgresProfileID(ctx) + case backupjob.FieldRedisProfileID: + return m.OldRedisProfileID(ctx) case backupjob.FieldStartedAt: return m.OldStartedAt(ctx) case backupjob.FieldFinishedAt: @@ -1205,6 +1376,27 @@ func (m *BackupJobMutation) SetField(name string, value ent.Value) error { } m.SetUploadToS3(v) return nil + case backupjob.FieldS3ProfileID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetS3ProfileID(v) + return nil + case backupjob.FieldPostgresProfileID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPostgresProfileID(v) + return nil + case backupjob.FieldRedisProfileID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRedisProfileID(v) + return nil case backupjob.FieldStartedAt: v, ok := value.(time.Time) if !ok { @@ -1330,6 +1522,15 @@ func (m *BackupJobMutation) ClearedFields() []string { if m.FieldCleared(backupjob.FieldIdempotencyKey) { fields = append(fields, backupjob.FieldIdempotencyKey) } + if m.FieldCleared(backupjob.FieldS3ProfileID) { + fields = append(fields, backupjob.FieldS3ProfileID) + } + if m.FieldCleared(backupjob.FieldPostgresProfileID) { + fields = append(fields, backupjob.FieldPostgresProfileID) + } + if m.FieldCleared(backupjob.FieldRedisProfileID) { + fields = append(fields, backupjob.FieldRedisProfileID) + } if m.FieldCleared(backupjob.FieldStartedAt) { fields = append(fields, backupjob.FieldStartedAt) } @@ -1374,6 +1575,15 @@ func (m *BackupJobMutation) ClearField(name string) error { case backupjob.FieldIdempotencyKey: m.ClearIdempotencyKey() return nil + case backupjob.FieldS3ProfileID: + m.ClearS3ProfileID() + return nil + case backupjob.FieldPostgresProfileID: + m.ClearPostgresProfileID() + return nil + case backupjob.FieldRedisProfileID: + m.ClearRedisProfileID() + return nil case backupjob.FieldStartedAt: m.ClearStartedAt() return nil @@ -1427,6 +1637,15 @@ func (m *BackupJobMutation) ResetField(name string) error { case backupjob.FieldUploadToS3: m.ResetUploadToS3() return nil + case backupjob.FieldS3ProfileID: + m.ResetS3ProfileID() + return nil + case backupjob.FieldPostgresProfileID: + m.ResetPostgresProfileID() + return nil + case backupjob.FieldRedisProfileID: + m.ResetRedisProfileID() + return nil case backupjob.FieldStartedAt: m.ResetStartedAt() return nil @@ -2296,6 +2515,9 @@ type BackupS3ConfigMutation struct { op Op typ string id *int + profile_id *string + name *string + is_active *bool enabled *bool endpoint *string region *string @@ -2411,6 +2633,114 @@ func (m *BackupS3ConfigMutation) IDs(ctx context.Context) ([]int, error) { } } +// SetProfileID sets the "profile_id" field. +func (m *BackupS3ConfigMutation) SetProfileID(s string) { + m.profile_id = &s +} + +// ProfileID returns the value of the "profile_id" field in the mutation. +func (m *BackupS3ConfigMutation) ProfileID() (r string, exists bool) { + v := m.profile_id + if v == nil { + return + } + return *v, true +} + +// OldProfileID returns the old "profile_id" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldProfileID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProfileID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProfileID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProfileID: %w", err) + } + return oldValue.ProfileID, nil +} + +// ResetProfileID resets all changes to the "profile_id" field. +func (m *BackupS3ConfigMutation) ResetProfileID() { + m.profile_id = nil +} + +// SetName sets the "name" field. +func (m *BackupS3ConfigMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *BackupS3ConfigMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *BackupS3ConfigMutation) ResetName() { + m.name = nil +} + +// SetIsActive sets the "is_active" field. +func (m *BackupS3ConfigMutation) SetIsActive(b bool) { + m.is_active = &b +} + +// IsActive returns the value of the "is_active" field in the mutation. +func (m *BackupS3ConfigMutation) IsActive() (r bool, exists bool) { + v := m.is_active + if v == nil { + return + } + return *v, true +} + +// OldIsActive returns the old "is_active" field's value of the BackupS3Config entity. +// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupS3ConfigMutation) OldIsActive(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsActive is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsActive requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsActive: %w", err) + } + return oldValue.IsActive, nil +} + +// ResetIsActive resets all changes to the "is_active" field. +func (m *BackupS3ConfigMutation) ResetIsActive() { + m.is_active = nil +} + // SetEnabled sets the "enabled" field. func (m *BackupS3ConfigMutation) SetEnabled(b bool) { m.enabled = &b @@ -2854,7 +3184,16 @@ func (m *BackupS3ConfigMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BackupS3ConfigMutation) Fields() []string { - fields := make([]string, 0, 11) + fields := make([]string, 0, 14) + if m.profile_id != nil { + fields = append(fields, backups3config.FieldProfileID) + } + if m.name != nil { + fields = append(fields, backups3config.FieldName) + } + if m.is_active != nil { + fields = append(fields, backups3config.FieldIsActive) + } if m.enabled != nil { fields = append(fields, backups3config.FieldEnabled) } @@ -2896,6 +3235,12 @@ func (m *BackupS3ConfigMutation) Fields() []string { // schema. func (m *BackupS3ConfigMutation) Field(name string) (ent.Value, bool) { switch name { + case backups3config.FieldProfileID: + return m.ProfileID() + case backups3config.FieldName: + return m.Name() + case backups3config.FieldIsActive: + return m.IsActive() case backups3config.FieldEnabled: return m.Enabled() case backups3config.FieldEndpoint: @@ -2927,6 +3272,12 @@ func (m *BackupS3ConfigMutation) Field(name string) (ent.Value, bool) { // database failed. func (m *BackupS3ConfigMutation) OldField(ctx context.Context, name string) (ent.Value, error) { switch name { + case backups3config.FieldProfileID: + return m.OldProfileID(ctx) + case backups3config.FieldName: + return m.OldName(ctx) + case backups3config.FieldIsActive: + return m.OldIsActive(ctx) case backups3config.FieldEnabled: return m.OldEnabled(ctx) case backups3config.FieldEndpoint: @@ -2958,6 +3309,27 @@ func (m *BackupS3ConfigMutation) OldField(ctx context.Context, name string) (ent // type. func (m *BackupS3ConfigMutation) SetField(name string, value ent.Value) error { switch name { + case backups3config.FieldProfileID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProfileID(v) + return nil + case backups3config.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case backups3config.FieldIsActive: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsActive(v) + return nil case backups3config.FieldEnabled: v, ok := value.(bool) if !ok { @@ -3093,6 +3465,15 @@ func (m *BackupS3ConfigMutation) ClearField(name string) error { // It returns an error if the field is not defined in the schema. func (m *BackupS3ConfigMutation) ResetField(name string) error { switch name { + case backups3config.FieldProfileID: + m.ResetProfileID() + return nil + case backups3config.FieldName: + m.ResetName() + return nil + case backups3config.FieldIsActive: + m.ResetIsActive() + return nil case backups3config.FieldEnabled: m.ResetEnabled() return nil @@ -3904,6 +4285,9 @@ type BackupSourceConfigMutation struct { typ string id *int source_type *backupsourceconfig.SourceType + profile_id *string + name *string + is_active *bool host *string port *int addport *int @@ -4057,6 +4441,114 @@ func (m *BackupSourceConfigMutation) ResetSourceType() { m.source_type = nil } +// SetProfileID sets the "profile_id" field. +func (m *BackupSourceConfigMutation) SetProfileID(s string) { + m.profile_id = &s +} + +// ProfileID returns the value of the "profile_id" field in the mutation. +func (m *BackupSourceConfigMutation) ProfileID() (r string, exists bool) { + v := m.profile_id + if v == nil { + return + } + return *v, true +} + +// OldProfileID returns the old "profile_id" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldProfileID(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldProfileID is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldProfileID requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldProfileID: %w", err) + } + return oldValue.ProfileID, nil +} + +// ResetProfileID resets all changes to the "profile_id" field. +func (m *BackupSourceConfigMutation) ResetProfileID() { + m.profile_id = nil +} + +// SetName sets the "name" field. +func (m *BackupSourceConfigMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *BackupSourceConfigMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *BackupSourceConfigMutation) ResetName() { + m.name = nil +} + +// SetIsActive sets the "is_active" field. +func (m *BackupSourceConfigMutation) SetIsActive(b bool) { + m.is_active = &b +} + +// IsActive returns the value of the "is_active" field in the mutation. +func (m *BackupSourceConfigMutation) IsActive() (r bool, exists bool) { + v := m.is_active + if v == nil { + return + } + return *v, true +} + +// OldIsActive returns the old "is_active" field's value of the BackupSourceConfig entity. +// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *BackupSourceConfigMutation) OldIsActive(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsActive is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsActive requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsActive: %w", err) + } + return oldValue.IsActive, nil +} + +// ResetIsActive resets all changes to the "is_active" field. +func (m *BackupSourceConfigMutation) ResetIsActive() { + m.is_active = nil +} + // SetHost sets the "host" field. func (m *BackupSourceConfigMutation) SetHost(s string) { m.host = &s @@ -4633,10 +5125,19 @@ func (m *BackupSourceConfigMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *BackupSourceConfigMutation) Fields() []string { - fields := make([]string, 0, 12) + fields := make([]string, 0, 15) if m.source_type != nil { fields = append(fields, backupsourceconfig.FieldSourceType) } + if m.profile_id != nil { + fields = append(fields, backupsourceconfig.FieldProfileID) + } + if m.name != nil { + fields = append(fields, backupsourceconfig.FieldName) + } + if m.is_active != nil { + fields = append(fields, backupsourceconfig.FieldIsActive) + } if m.host != nil { fields = append(fields, backupsourceconfig.FieldHost) } @@ -4680,6 +5181,12 @@ func (m *BackupSourceConfigMutation) Field(name string) (ent.Value, bool) { switch name { case backupsourceconfig.FieldSourceType: return m.SourceType() + case backupsourceconfig.FieldProfileID: + return m.ProfileID() + case backupsourceconfig.FieldName: + return m.Name() + case backupsourceconfig.FieldIsActive: + return m.IsActive() case backupsourceconfig.FieldHost: return m.Host() case backupsourceconfig.FieldPort: @@ -4713,6 +5220,12 @@ func (m *BackupSourceConfigMutation) OldField(ctx context.Context, name string) switch name { case backupsourceconfig.FieldSourceType: return m.OldSourceType(ctx) + case backupsourceconfig.FieldProfileID: + return m.OldProfileID(ctx) + case backupsourceconfig.FieldName: + return m.OldName(ctx) + case backupsourceconfig.FieldIsActive: + return m.OldIsActive(ctx) case backupsourceconfig.FieldHost: return m.OldHost(ctx) case backupsourceconfig.FieldPort: @@ -4751,6 +5264,27 @@ func (m *BackupSourceConfigMutation) SetField(name string, value ent.Value) erro } m.SetSourceType(v) return nil + case backupsourceconfig.FieldProfileID: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetProfileID(v) + return nil + case backupsourceconfig.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case backupsourceconfig.FieldIsActive: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsActive(v) + return nil case backupsourceconfig.FieldHost: v, ok := value.(string) if !ok { @@ -4958,6 +5492,15 @@ func (m *BackupSourceConfigMutation) ResetField(name string) error { case backupsourceconfig.FieldSourceType: m.ResetSourceType() return nil + case backupsourceconfig.FieldProfileID: + m.ResetProfileID() + return nil + case backupsourceconfig.FieldName: + m.ResetName() + return nil + case backupsourceconfig.FieldIsActive: + m.ResetIsActive() + return nil case backupsourceconfig.FieldHost: m.ResetHost() return nil diff --git a/backup/ent/runtime.go b/backup/ent/runtime.go index d46c11e92..1d5ab9a13 100644 --- a/backup/ent/runtime.go +++ b/backup/ent/runtime.go @@ -28,11 +28,11 @@ func init() { // backupjob.DefaultUploadToS3 holds the default value on creation for the upload_to_s3 field. backupjob.DefaultUploadToS3 = backupjobDescUploadToS3.Default.(bool) // backupjobDescCreatedAt is the schema descriptor for created_at field. - backupjobDescCreatedAt := backupjobFields[15].Descriptor() + backupjobDescCreatedAt := backupjobFields[18].Descriptor() // backupjob.DefaultCreatedAt holds the default value on creation for the created_at field. backupjob.DefaultCreatedAt = backupjobDescCreatedAt.Default.(func() time.Time) // backupjobDescUpdatedAt is the schema descriptor for updated_at field. - backupjobDescUpdatedAt := backupjobFields[16].Descriptor() + backupjobDescUpdatedAt := backupjobFields[19].Descriptor() // backupjob.DefaultUpdatedAt holds the default value on creation for the updated_at field. backupjob.DefaultUpdatedAt = backupjobDescUpdatedAt.Default.(func() time.Time) // backupjob.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. @@ -53,44 +53,56 @@ func init() { backupjobevent.DefaultCreatedAt = backupjobeventDescCreatedAt.Default.(func() time.Time) backups3configFields := schema.BackupS3Config{}.Fields() _ = backups3configFields + // backups3configDescProfileID is the schema descriptor for profile_id field. + backups3configDescProfileID := backups3configFields[0].Descriptor() + // backups3config.DefaultProfileID holds the default value on creation for the profile_id field. + backups3config.DefaultProfileID = backups3configDescProfileID.Default.(string) + // backups3configDescName is the schema descriptor for name field. + backups3configDescName := backups3configFields[1].Descriptor() + // backups3config.DefaultName holds the default value on creation for the name field. + backups3config.DefaultName = backups3configDescName.Default.(string) + // backups3configDescIsActive is the schema descriptor for is_active field. + backups3configDescIsActive := backups3configFields[2].Descriptor() + // backups3config.DefaultIsActive holds the default value on creation for the is_active field. + backups3config.DefaultIsActive = backups3configDescIsActive.Default.(bool) // backups3configDescEnabled is the schema descriptor for enabled field. - backups3configDescEnabled := backups3configFields[0].Descriptor() + backups3configDescEnabled := backups3configFields[3].Descriptor() // backups3config.DefaultEnabled holds the default value on creation for the enabled field. backups3config.DefaultEnabled = backups3configDescEnabled.Default.(bool) // backups3configDescEndpoint is the schema descriptor for endpoint field. - backups3configDescEndpoint := backups3configFields[1].Descriptor() + backups3configDescEndpoint := backups3configFields[4].Descriptor() // backups3config.DefaultEndpoint holds the default value on creation for the endpoint field. backups3config.DefaultEndpoint = backups3configDescEndpoint.Default.(string) // backups3configDescRegion is the schema descriptor for region field. - backups3configDescRegion := backups3configFields[2].Descriptor() + backups3configDescRegion := backups3configFields[5].Descriptor() // backups3config.DefaultRegion holds the default value on creation for the region field. backups3config.DefaultRegion = backups3configDescRegion.Default.(string) // backups3configDescBucket is the schema descriptor for bucket field. - backups3configDescBucket := backups3configFields[3].Descriptor() + backups3configDescBucket := backups3configFields[6].Descriptor() // backups3config.DefaultBucket holds the default value on creation for the bucket field. backups3config.DefaultBucket = backups3configDescBucket.Default.(string) // backups3configDescAccessKeyID is the schema descriptor for access_key_id field. - backups3configDescAccessKeyID := backups3configFields[4].Descriptor() + backups3configDescAccessKeyID := backups3configFields[7].Descriptor() // backups3config.DefaultAccessKeyID holds the default value on creation for the access_key_id field. backups3config.DefaultAccessKeyID = backups3configDescAccessKeyID.Default.(string) // backups3configDescPrefix is the schema descriptor for prefix field. - backups3configDescPrefix := backups3configFields[6].Descriptor() + backups3configDescPrefix := backups3configFields[9].Descriptor() // backups3config.DefaultPrefix holds the default value on creation for the prefix field. backups3config.DefaultPrefix = backups3configDescPrefix.Default.(string) // backups3configDescForcePathStyle is the schema descriptor for force_path_style field. - backups3configDescForcePathStyle := backups3configFields[7].Descriptor() + backups3configDescForcePathStyle := backups3configFields[10].Descriptor() // backups3config.DefaultForcePathStyle holds the default value on creation for the force_path_style field. backups3config.DefaultForcePathStyle = backups3configDescForcePathStyle.Default.(bool) // backups3configDescUseSsl is the schema descriptor for use_ssl field. - backups3configDescUseSsl := backups3configFields[8].Descriptor() + backups3configDescUseSsl := backups3configFields[11].Descriptor() // backups3config.DefaultUseSsl holds the default value on creation for the use_ssl field. backups3config.DefaultUseSsl = backups3configDescUseSsl.Default.(bool) // backups3configDescCreatedAt is the schema descriptor for created_at field. - backups3configDescCreatedAt := backups3configFields[9].Descriptor() + backups3configDescCreatedAt := backups3configFields[12].Descriptor() // backups3config.DefaultCreatedAt holds the default value on creation for the created_at field. backups3config.DefaultCreatedAt = backups3configDescCreatedAt.Default.(func() time.Time) // backups3configDescUpdatedAt is the schema descriptor for updated_at field. - backups3configDescUpdatedAt := backups3configFields[10].Descriptor() + backups3configDescUpdatedAt := backups3configFields[13].Descriptor() // backups3config.DefaultUpdatedAt holds the default value on creation for the updated_at field. backups3config.DefaultUpdatedAt = backups3configDescUpdatedAt.Default.(func() time.Time) // backups3config.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. @@ -125,16 +137,20 @@ func init() { backupsetting.UpdateDefaultUpdatedAt = backupsettingDescUpdatedAt.UpdateDefault.(func() time.Time) backupsourceconfigFields := schema.BackupSourceConfig{}.Fields() _ = backupsourceconfigFields + // backupsourceconfigDescIsActive is the schema descriptor for is_active field. + backupsourceconfigDescIsActive := backupsourceconfigFields[3].Descriptor() + // backupsourceconfig.DefaultIsActive holds the default value on creation for the is_active field. + backupsourceconfig.DefaultIsActive = backupsourceconfigDescIsActive.Default.(bool) // backupsourceconfigDescContainerName is the schema descriptor for container_name field. - backupsourceconfigDescContainerName := backupsourceconfigFields[9].Descriptor() + backupsourceconfigDescContainerName := backupsourceconfigFields[12].Descriptor() // backupsourceconfig.DefaultContainerName holds the default value on creation for the container_name field. backupsourceconfig.DefaultContainerName = backupsourceconfigDescContainerName.Default.(string) // backupsourceconfigDescCreatedAt is the schema descriptor for created_at field. - backupsourceconfigDescCreatedAt := backupsourceconfigFields[10].Descriptor() + backupsourceconfigDescCreatedAt := backupsourceconfigFields[13].Descriptor() // backupsourceconfig.DefaultCreatedAt holds the default value on creation for the created_at field. backupsourceconfig.DefaultCreatedAt = backupsourceconfigDescCreatedAt.Default.(func() time.Time) // backupsourceconfigDescUpdatedAt is the schema descriptor for updated_at field. - backupsourceconfigDescUpdatedAt := backupsourceconfigFields[11].Descriptor() + backupsourceconfigDescUpdatedAt := backupsourceconfigFields[14].Descriptor() // backupsourceconfig.DefaultUpdatedAt holds the default value on creation for the updated_at field. backupsourceconfig.DefaultUpdatedAt = backupsourceconfigDescUpdatedAt.Default.(func() time.Time) // backupsourceconfig.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. diff --git a/backup/ent/schema/backup_job.go b/backup/ent/schema/backup_job.go index b0cbbe8e6..39f023a4a 100644 --- a/backup/ent/schema/backup_job.go +++ b/backup/ent/schema/backup_job.go @@ -21,6 +21,9 @@ func (BackupJob) Fields() []ent.Field { field.String("triggered_by").Default("system"), field.String("idempotency_key").Optional(), field.Bool("upload_to_s3").Default(false), + field.String("s3_profile_id").Optional(), + field.String("postgres_profile_id").Optional(), + field.String("redis_profile_id").Optional(), field.Time("started_at").Optional().Nillable(), field.Time("finished_at").Optional().Nillable(), field.String("error_message").Optional(), @@ -46,5 +49,8 @@ func (BackupJob) Indexes() []ent.Index { index.Fields("status", "created_at"), index.Fields("backup_type", "created_at"), index.Fields("idempotency_key"), + index.Fields("s3_profile_id", "status"), + index.Fields("postgres_profile_id", "status"), + index.Fields("redis_profile_id", "status"), } } diff --git a/backup/ent/schema/backup_s3_config.go b/backup/ent/schema/backup_s3_config.go index 3293f0622..c4b5c1ad0 100644 --- a/backup/ent/schema/backup_s3_config.go +++ b/backup/ent/schema/backup_s3_config.go @@ -5,6 +5,7 @@ import ( "entgo.io/ent" "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" ) type BackupS3Config struct { @@ -13,6 +14,9 @@ type BackupS3Config struct { func (BackupS3Config) Fields() []ent.Field { return []ent.Field{ + field.String("profile_id").Default("default"), + field.String("name").Default("默认账号"), + field.Bool("is_active").Default(false), field.Bool("enabled").Default(false), field.String("endpoint").Default(""), field.String("region").Default(""), @@ -26,3 +30,10 @@ func (BackupS3Config) Fields() []ent.Field { field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), } } + +func (BackupS3Config) Indexes() []ent.Index { + return []ent.Index{ + index.Fields("profile_id").Unique(), + index.Fields("is_active"), + } +} diff --git a/backup/ent/schema/backup_source_config.go b/backup/ent/schema/backup_source_config.go index b17de7a76..f399d4123 100644 --- a/backup/ent/schema/backup_source_config.go +++ b/backup/ent/schema/backup_source_config.go @@ -15,6 +15,9 @@ type BackupSourceConfig struct { func (BackupSourceConfig) Fields() []ent.Field { return []ent.Field{ field.Enum("source_type").Values("postgres", "redis"), + field.String("profile_id"), + field.String("name"), + field.Bool("is_active").Default(false), field.String("host").Optional(), field.Int("port").Optional().Nillable(), field.String("username").Optional(), @@ -31,6 +34,7 @@ func (BackupSourceConfig) Fields() []ent.Field { func (BackupSourceConfig) Indexes() []ent.Index { return []ent.Index{ - index.Fields("source_type").Unique(), + index.Fields("source_type", "profile_id").Unique(), + index.Fields("source_type", "is_active"), } } diff --git a/backup/go.sum b/backup/go.sum index a72f7c295..1b762a27d 100644 --- a/backup/go.sum +++ b/backup/go.sum @@ -50,6 +50,7 @@ github.com/aws/smithy-go v1.24.1 h1:VbyeNfmYkWoxMVpGUAbQumkODcYmfMRfZ8yQiH30SK0= github.com/aws/smithy-go v1.24.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= +github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= @@ -74,6 +75,8 @@ github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= +github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= +github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -82,6 +85,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= @@ -90,14 +95,21 @@ github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOF github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= +github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= diff --git a/backup/internal/executor/runner.go b/backup/internal/executor/runner.go index e55c641ad..18a977280 100644 --- a/backup/internal/executor/runner.go +++ b/backup/internal/executor/runner.go @@ -71,6 +71,8 @@ type bundleManifest struct { JobID string `json:"job_id"` BackupType string `json:"backup_type"` SourceMode string `json:"source_mode"` + PostgresID string `json:"postgres_profile_id,omitempty"` + RedisID string `json:"redis_profile_id,omitempty"` CreatedAt string `json:"created_at"` Files []generatedFile `json:"files"` } @@ -239,6 +241,39 @@ func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error if err != nil { return nil, fmt.Errorf("load config failed: %w", err) } + backupType := strings.TrimSpace(job.BackupType.String()) + + effectiveConfig := *cfg + if backupType == backupjob.BackupTypePostgres.String() || backupType == backupjob.BackupTypeFull.String() { + postgresProfileID := strings.TrimSpace(job.PostgresProfileID) + if postgresProfileID != "" && postgresProfileID != strings.TrimSpace(cfg.ActivePostgresID) { + postgresProfile, profileErr := r.store.GetSourceProfile(ctx, "postgres", postgresProfileID) + if profileErr != nil { + return nil, fmt.Errorf("load postgres source profile failed: %w", profileErr) + } + effectiveConfig.Postgres = postgresProfile.Config + } + } + if backupType == backupjob.BackupTypeRedis.String() || backupType == backupjob.BackupTypeFull.String() { + redisProfileID := strings.TrimSpace(job.RedisProfileID) + if redisProfileID != "" && redisProfileID != strings.TrimSpace(cfg.ActiveRedisID) { + redisProfile, profileErr := r.store.GetSourceProfile(ctx, "redis", redisProfileID) + if profileErr != nil { + return nil, fmt.Errorf("load redis source profile failed: %w", profileErr) + } + effectiveConfig.Redis = redisProfile.Config + } + } + + uploadS3Config := cfg.S3 + profileID := strings.TrimSpace(job.S3ProfileID) + if profileID != "" && profileID != cfg.ActiveS3ProfileID { + profile, profileErr := r.store.GetS3Profile(ctx, profileID) + if profileErr != nil { + return nil, fmt.Errorf("load s3 profile failed: %w", profileErr) + } + uploadS3Config = profile.S3 + } backupRoot := normalizeBackupRoot(cfg.BackupRoot) jobDir := filepath.Join( @@ -253,11 +288,10 @@ func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error } generated := make([]generatedFile, 0, 4) - backupType := strings.TrimSpace(job.BackupType.String()) if backupType == backupjob.BackupTypePostgres.String() || backupType == backupjob.BackupTypeFull.String() { postgresPath := filepath.Join(jobDir, "postgres.dump") - if err := runPostgresBackup(ctx, cfg, postgresPath); err != nil { + if err := runPostgresBackup(ctx, &effectiveConfig, postgresPath); err != nil { return nil, fmt.Errorf("postgres backup failed: %w", err) } gf, err := buildGeneratedFile("postgres.dump", postgresPath) @@ -270,7 +304,7 @@ func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error if backupType == backupjob.BackupTypeRedis.String() || backupType == backupjob.BackupTypeFull.String() { redisPath := filepath.Join(jobDir, "redis.rdb") - if err := runRedisBackup(ctx, cfg, redisPath, job.JobID); err != nil { + if err := runRedisBackup(ctx, &effectiveConfig, redisPath, job.JobID); err != nil { return nil, fmt.Errorf("redis backup failed: %w", err) } gf, err := buildGeneratedFile("redis.rdb", redisPath) @@ -284,7 +318,9 @@ func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error manifest := bundleManifest{ JobID: job.JobID, BackupType: backupType, - SourceMode: strings.TrimSpace(cfg.SourceMode), + SourceMode: strings.TrimSpace(effectiveConfig.SourceMode), + PostgresID: strings.TrimSpace(job.PostgresProfileID), + RedisID: strings.TrimSpace(job.RedisProfileID), CreatedAt: time.Now().UTC().Format(time.RFC3339), Files: generated, } @@ -318,7 +354,7 @@ func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error if job.UploadToS3 { r.logEvent(job.JobID, "info", "s3", "start upload to s3", "") - s3Object, uploadErr := uploadToS3(ctx, cfg, job.JobID, bundlePath) + s3Object, uploadErr := uploadToS3(ctx, uploadS3Config, job.JobID, bundlePath) if uploadErr != nil { result.PartialErr = fmt.Errorf("upload s3 failed: %w", uploadErr) r.logEvent(job.JobID, "warning", "s3", "upload to s3 failed", shortenError(uploadErr)) @@ -328,7 +364,7 @@ func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error } } - if err := applyRetentionPolicy(ctx, r.store, cfg); err != nil { + if err := applyRetentionPolicy(ctx, r.store, &effectiveConfig); err != nil { r.logger.Printf("retention cleanup failed: %v", err) } @@ -461,29 +497,26 @@ func runRedisBackup(ctx context.Context, cfg *entstore.ConfigSnapshot, destinati } } -func uploadToS3(ctx context.Context, cfg *entstore.ConfigSnapshot, jobID, bundlePath string) (*entstore.BackupS3ObjectSnapshot, error) { - if cfg == nil { - return nil, errors.New("config is nil") - } - if !cfg.S3.Enabled { +func uploadToS3(ctx context.Context, s3Cfg entstore.S3Config, jobID, bundlePath string) (*entstore.BackupS3ObjectSnapshot, error) { + if !s3Cfg.Enabled { return nil, errors.New("s3 is disabled") } - if strings.TrimSpace(cfg.S3.Bucket) == "" { + if strings.TrimSpace(s3Cfg.Bucket) == "" { return nil, errors.New("s3.bucket is required") } - if strings.TrimSpace(cfg.S3.Region) == "" { + if strings.TrimSpace(s3Cfg.Region) == "" { return nil, errors.New("s3.region is required") } client, err := s3client.New(ctx, s3client.Config{ - Endpoint: strings.TrimSpace(cfg.S3.Endpoint), - Region: strings.TrimSpace(cfg.S3.Region), - AccessKeyID: strings.TrimSpace(cfg.S3.AccessKeyID), - SecretAccessKey: strings.TrimSpace(cfg.S3.SecretAccessKey), - Bucket: strings.TrimSpace(cfg.S3.Bucket), - Prefix: strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/"), - ForcePathStyle: cfg.S3.ForcePathStyle, - UseSSL: cfg.S3.UseSSL, + Endpoint: strings.TrimSpace(s3Cfg.Endpoint), + Region: strings.TrimSpace(s3Cfg.Region), + AccessKeyID: strings.TrimSpace(s3Cfg.AccessKeyID), + SecretAccessKey: strings.TrimSpace(s3Cfg.SecretAccessKey), + Bucket: strings.TrimSpace(s3Cfg.Bucket), + Prefix: strings.Trim(strings.TrimSpace(s3Cfg.Prefix), "/"), + ForcePathStyle: s3Cfg.ForcePathStyle, + UseSSL: s3Cfg.UseSSL, }) if err != nil { return nil, err diff --git a/backup/internal/grpcserver/server.go b/backup/internal/grpcserver/server.go index 107906b28..6e453b381 100644 --- a/backup/internal/grpcserver/server.go +++ b/backup/internal/grpcserver/server.go @@ -73,6 +73,118 @@ func (s *Server) UpdateConfig(ctx context.Context, req *backupv1.UpdateConfigReq return &backupv1.UpdateConfigResponse{Config: toProtoConfig(updated)}, nil } +func (s *Server) ListSourceProfiles(ctx context.Context, req *backupv1.ListSourceProfilesRequest) (*backupv1.ListSourceProfilesResponse, error) { + sourceType := "" + if req != nil { + sourceType = strings.TrimSpace(req.GetSourceType()) + } + if sourceType == "" { + return nil, status.Error(codes.InvalidArgument, "source_type is required") + } + + items, err := s.store.ListSourceProfiles(ctx, sourceType) + if err != nil { + if mapped := mapSourceProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "list source profiles failed: %v", err) + } + + out := make([]*backupv1.SourceProfile, 0, len(items)) + for _, item := range items { + out = append(out, toProtoSourceProfile(item)) + } + return &backupv1.ListSourceProfilesResponse{Items: out}, nil +} + +func (s *Server) CreateSourceProfile(ctx context.Context, req *backupv1.CreateSourceProfileRequest) (*backupv1.CreateSourceProfileResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + if err := validateSourceProfileRequest(req.GetSourceType(), req.GetProfileId(), req.GetName(), req.GetConfig()); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + profile, err := s.store.CreateSourceProfile(ctx, entstore.CreateSourceProfileInput{ + SourceType: strings.TrimSpace(req.GetSourceType()), + ProfileID: strings.TrimSpace(req.GetProfileId()), + Name: strings.TrimSpace(req.GetName()), + Config: fromProtoSourceConfig(req.GetConfig()), + SetActive: req.GetSetActive(), + }) + if err != nil { + if mapped := mapSourceProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "create source profile failed: %v", err) + } + return &backupv1.CreateSourceProfileResponse{Profile: toProtoSourceProfile(profile)}, nil +} + +func (s *Server) UpdateSourceProfile(ctx context.Context, req *backupv1.UpdateSourceProfileRequest) (*backupv1.UpdateSourceProfileResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + if err := validateSourceProfileRequest(req.GetSourceType(), req.GetProfileId(), req.GetName(), req.GetConfig()); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + profile, err := s.store.UpdateSourceProfile(ctx, entstore.UpdateSourceProfileInput{ + SourceType: strings.TrimSpace(req.GetSourceType()), + ProfileID: strings.TrimSpace(req.GetProfileId()), + Name: strings.TrimSpace(req.GetName()), + Config: fromProtoSourceConfig(req.GetConfig()), + }) + if err != nil { + if mapped := mapSourceProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "update source profile failed: %v", err) + } + return &backupv1.UpdateSourceProfileResponse{Profile: toProtoSourceProfile(profile)}, nil +} + +func (s *Server) DeleteSourceProfile(ctx context.Context, req *backupv1.DeleteSourceProfileRequest) (*backupv1.DeleteSourceProfileResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + if strings.TrimSpace(req.GetSourceType()) == "" { + return nil, status.Error(codes.InvalidArgument, "source_type is required") + } + if strings.TrimSpace(req.GetProfileId()) == "" { + return nil, status.Error(codes.InvalidArgument, "profile_id is required") + } + + if err := s.store.DeleteSourceProfile(ctx, req.GetSourceType(), req.GetProfileId()); err != nil { + if mapped := mapSourceProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "delete source profile failed: %v", err) + } + return &backupv1.DeleteSourceProfileResponse{}, nil +} + +func (s *Server) SetActiveSourceProfile(ctx context.Context, req *backupv1.SetActiveSourceProfileRequest) (*backupv1.SetActiveSourceProfileResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + if strings.TrimSpace(req.GetSourceType()) == "" { + return nil, status.Error(codes.InvalidArgument, "source_type is required") + } + if strings.TrimSpace(req.GetProfileId()) == "" { + return nil, status.Error(codes.InvalidArgument, "profile_id is required") + } + + profile, err := s.store.SetActiveSourceProfile(ctx, req.GetSourceType(), req.GetProfileId()) + if err != nil { + if mapped := mapSourceProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "set active source profile failed: %v", err) + } + return &backupv1.SetActiveSourceProfileResponse{Profile: toProtoSourceProfile(profile)}, nil +} + func (s *Server) ValidateS3(ctx context.Context, req *backupv1.ValidateS3Request) (*backupv1.ValidateS3Response, error) { if req == nil || req.GetS3() == nil { return nil, status.Error(codes.InvalidArgument, "s3 config is required") @@ -106,6 +218,94 @@ func (s *Server) ValidateS3(ctx context.Context, req *backupv1.ValidateS3Request return &backupv1.ValidateS3Response{Ok: true, Message: "ok"}, nil } +func (s *Server) ListS3Profiles(ctx context.Context, _ *backupv1.ListS3ProfilesRequest) (*backupv1.ListS3ProfilesResponse, error) { + items, err := s.store.ListS3Profiles(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "list s3 profiles failed: %v", err) + } + + out := make([]*backupv1.S3Profile, 0, len(items)) + for _, item := range items { + out = append(out, toProtoS3Profile(item)) + } + return &backupv1.ListS3ProfilesResponse{Items: out}, nil +} + +func (s *Server) CreateS3Profile(ctx context.Context, req *backupv1.CreateS3ProfileRequest) (*backupv1.CreateS3ProfileResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + if err := validateS3ProfileRequest(req.GetProfileId(), req.GetName(), req.GetS3()); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + profile, err := s.store.CreateS3Profile(ctx, entstore.CreateS3ProfileInput{ + ProfileID: strings.TrimSpace(req.GetProfileId()), + Name: strings.TrimSpace(req.GetName()), + S3: fromProtoS3Config(req.GetS3()), + SetActive: req.GetSetActive(), + }) + if err != nil { + if mapped := mapS3ProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "create s3 profile failed: %v", err) + } + return &backupv1.CreateS3ProfileResponse{Profile: toProtoS3Profile(profile)}, nil +} + +func (s *Server) UpdateS3Profile(ctx context.Context, req *backupv1.UpdateS3ProfileRequest) (*backupv1.UpdateS3ProfileResponse, error) { + if req == nil { + return nil, status.Error(codes.InvalidArgument, "request is required") + } + if err := validateS3ProfileRequest(req.GetProfileId(), req.GetName(), req.GetS3()); err != nil { + return nil, status.Error(codes.InvalidArgument, err.Error()) + } + + profile, err := s.store.UpdateS3Profile(ctx, entstore.UpdateS3ProfileInput{ + ProfileID: strings.TrimSpace(req.GetProfileId()), + Name: strings.TrimSpace(req.GetName()), + S3: fromProtoS3Config(req.GetS3()), + }) + if err != nil { + if mapped := mapS3ProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "update s3 profile failed: %v", err) + } + return &backupv1.UpdateS3ProfileResponse{Profile: toProtoS3Profile(profile)}, nil +} + +func (s *Server) DeleteS3Profile(ctx context.Context, req *backupv1.DeleteS3ProfileRequest) (*backupv1.DeleteS3ProfileResponse, error) { + if req == nil || strings.TrimSpace(req.GetProfileId()) == "" { + return nil, status.Error(codes.InvalidArgument, "profile_id is required") + } + + err := s.store.DeleteS3Profile(ctx, req.GetProfileId()) + if err != nil { + if mapped := mapS3ProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "delete s3 profile failed: %v", err) + } + return &backupv1.DeleteS3ProfileResponse{}, nil +} + +func (s *Server) SetActiveS3Profile(ctx context.Context, req *backupv1.SetActiveS3ProfileRequest) (*backupv1.SetActiveS3ProfileResponse, error) { + if req == nil || strings.TrimSpace(req.GetProfileId()) == "" { + return nil, status.Error(codes.InvalidArgument, "profile_id is required") + } + + profile, err := s.store.SetActiveS3Profile(ctx, req.GetProfileId()) + if err != nil { + if mapped := mapS3ProfileStoreError(err); mapped != nil { + return nil, mapped + } + return nil, status.Errorf(codes.Internal, "set active s3 profile failed: %v", err) + } + return &backupv1.SetActiveS3ProfileResponse{Profile: toProtoS3Profile(profile)}, nil +} + func (s *Server) CreateBackupJob(ctx context.Context, req *backupv1.CreateBackupJobRequest) (*backupv1.CreateBackupJobResponse, error) { if req == nil { return nil, status.Error(codes.InvalidArgument, "request is required") @@ -120,8 +320,17 @@ func (s *Server) CreateBackupJob(ctx context.Context, req *backupv1.CreateBackup UploadToS3: req.GetUploadToS3(), TriggeredBy: strings.TrimSpace(req.GetTriggeredBy()), IdempotencyKey: strings.TrimSpace(req.GetIdempotencyKey()), + S3ProfileID: strings.TrimSpace(req.GetS3ProfileId()), + PostgresID: strings.TrimSpace(req.GetPostgresProfileId()), + RedisID: strings.TrimSpace(req.GetRedisProfileId()), }) if err != nil { + if ent.IsNotFound(err) { + return nil, status.Error(codes.InvalidArgument, "source profile or s3 profile not found") + } + if mapped := mapSourceProfileStoreError(err); mapped != nil { + return nil, mapped + } return nil, status.Errorf(codes.Internal, "create backup job failed: %v", err) } if created && s.notifier != nil { @@ -200,6 +409,80 @@ func validateConfig(cfg *backupv1.BackupConfig) error { return nil } +func validateS3ProfileRequest(profileID, name string, s3Cfg *backupv1.S3Config) error { + if strings.TrimSpace(profileID) == "" { + return errors.New("profile_id is required") + } + if strings.TrimSpace(name) == "" { + return errors.New("name is required") + } + if s3Cfg == nil { + return errors.New("s3 config is required") + } + if s3Cfg.GetEnabled() { + if strings.TrimSpace(s3Cfg.GetBucket()) == "" { + return errors.New("s3.bucket is required") + } + if strings.TrimSpace(s3Cfg.GetRegion()) == "" { + return errors.New("s3.region is required") + } + } + return nil +} + +func validateSourceProfileRequest(sourceType, profileID, name string, cfg *backupv1.SourceConfig) error { + if strings.TrimSpace(sourceType) == "" { + return errors.New("source_type is required") + } + if strings.TrimSpace(sourceType) != "postgres" && strings.TrimSpace(sourceType) != "redis" { + return errors.New("source_type must be postgres or redis") + } + if strings.TrimSpace(profileID) == "" { + return errors.New("profile_id is required") + } + if strings.TrimSpace(name) == "" { + return errors.New("name is required") + } + if cfg == nil { + return errors.New("source config is required") + } + return nil +} + +func mapS3ProfileStoreError(err error) error { + switch { + case err == nil: + return nil + case ent.IsNotFound(err): + return status.Error(codes.NotFound, "s3 profile not found") + case ent.IsConstraintError(err): + return status.Error(codes.AlreadyExists, "s3 profile already exists") + case errors.Is(err, entstore.ErrS3ProfileRequired): + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, entstore.ErrActiveS3Profile), errors.Is(err, entstore.ErrS3ProfileInUse): + return status.Error(codes.FailedPrecondition, err.Error()) + default: + return nil + } +} + +func mapSourceProfileStoreError(err error) error { + switch { + case err == nil: + return nil + case ent.IsNotFound(err): + return status.Error(codes.NotFound, "source profile not found") + case ent.IsConstraintError(err): + return status.Error(codes.AlreadyExists, "source profile already exists") + case errors.Is(err, entstore.ErrSourceTypeInvalid), errors.Is(err, entstore.ErrSourceIDRequired): + return status.Error(codes.InvalidArgument, err.Error()) + case errors.Is(err, entstore.ErrSourceActive), errors.Is(err, entstore.ErrSourceInUse): + return status.Error(codes.FailedPrecondition, err.Error()) + default: + return nil + } +} + func isValidBackupType(v string) bool { switch v { case backupjob.BackupTypePostgres.String(), backupjob.BackupTypeRedis.String(), backupjob.BackupTypeFull.String(): @@ -223,42 +506,54 @@ func isValidBackupStatus(v string) bool { } func fromProtoConfig(cfg *backupv1.BackupConfig) entstore.ConfigSnapshot { - postgres := cfg.GetPostgres() - redis := cfg.GetRedis() s3Cfg := cfg.GetS3() return entstore.ConfigSnapshot{ - SourceMode: strings.TrimSpace(cfg.GetSourceMode()), - BackupRoot: strings.TrimSpace(cfg.GetBackupRoot()), - SQLitePath: strings.TrimSpace(cfg.GetSqlitePath()), - RetentionDays: cfg.GetRetentionDays(), - KeepLast: cfg.GetKeepLast(), - Postgres: entstore.SourceConfig{ - Host: strings.TrimSpace(postgres.GetHost()), - Port: postgres.GetPort(), - User: strings.TrimSpace(postgres.GetUser()), - Password: strings.TrimSpace(postgres.GetPassword()), - Database: strings.TrimSpace(postgres.GetDatabase()), - SSLMode: strings.TrimSpace(postgres.GetSslMode()), - ContainerName: strings.TrimSpace(postgres.GetContainerName()), - }, - Redis: entstore.SourceConfig{ - Addr: strings.TrimSpace(redis.GetAddr()), - Username: strings.TrimSpace(redis.GetUsername()), - Password: strings.TrimSpace(redis.GetPassword()), - DB: redis.GetDb(), - ContainerName: strings.TrimSpace(redis.GetContainerName()), - }, - S3: entstore.S3Config{ - Enabled: s3Cfg.GetEnabled(), - Endpoint: strings.TrimSpace(s3Cfg.GetEndpoint()), - Region: strings.TrimSpace(s3Cfg.GetRegion()), - Bucket: strings.TrimSpace(s3Cfg.GetBucket()), - AccessKeyID: strings.TrimSpace(s3Cfg.GetAccessKeyId()), - SecretAccessKey: strings.TrimSpace(s3Cfg.GetSecretAccessKey()), - Prefix: strings.Trim(strings.TrimSpace(s3Cfg.GetPrefix()), "/"), - ForcePathStyle: s3Cfg.GetForcePathStyle(), - UseSSL: s3Cfg.GetUseSsl(), - }, + SourceMode: strings.TrimSpace(cfg.GetSourceMode()), + BackupRoot: strings.TrimSpace(cfg.GetBackupRoot()), + SQLitePath: strings.TrimSpace(cfg.GetSqlitePath()), + RetentionDays: cfg.GetRetentionDays(), + KeepLast: cfg.GetKeepLast(), + Postgres: fromProtoSourceConfig(cfg.GetPostgres()), + Redis: fromProtoSourceConfig(cfg.GetRedis()), + S3: fromProtoS3Config(s3Cfg), + ActivePostgresID: strings.TrimSpace(cfg.GetActivePostgresProfileId()), + ActiveRedisID: strings.TrimSpace(cfg.GetActiveRedisProfileId()), + ActiveS3ProfileID: strings.TrimSpace(cfg.GetActiveS3ProfileId()), + } +} + +func fromProtoSourceConfig(sourceCfg *backupv1.SourceConfig) entstore.SourceConfig { + if sourceCfg == nil { + return entstore.SourceConfig{} + } + return entstore.SourceConfig{ + Host: strings.TrimSpace(sourceCfg.GetHost()), + Port: sourceCfg.GetPort(), + User: strings.TrimSpace(sourceCfg.GetUser()), + Username: strings.TrimSpace(sourceCfg.GetUsername()), + Password: strings.TrimSpace(sourceCfg.GetPassword()), + Database: strings.TrimSpace(sourceCfg.GetDatabase()), + SSLMode: strings.TrimSpace(sourceCfg.GetSslMode()), + Addr: strings.TrimSpace(sourceCfg.GetAddr()), + DB: sourceCfg.GetDb(), + ContainerName: strings.TrimSpace(sourceCfg.GetContainerName()), + } +} + +func fromProtoS3Config(s3Cfg *backupv1.S3Config) entstore.S3Config { + if s3Cfg == nil { + return entstore.S3Config{} + } + return entstore.S3Config{ + Enabled: s3Cfg.GetEnabled(), + Endpoint: strings.TrimSpace(s3Cfg.GetEndpoint()), + Region: strings.TrimSpace(s3Cfg.GetRegion()), + Bucket: strings.TrimSpace(s3Cfg.GetBucket()), + AccessKeyID: strings.TrimSpace(s3Cfg.GetAccessKeyId()), + SecretAccessKey: strings.TrimSpace(s3Cfg.GetSecretAccessKey()), + Prefix: strings.Trim(strings.TrimSpace(s3Cfg.GetPrefix()), "/"), + ForcePathStyle: s3Cfg.GetForcePathStyle(), + UseSSL: s3Cfg.GetUseSsl(), } } @@ -272,22 +567,8 @@ func toProtoConfig(cfg *entstore.ConfigSnapshot) *backupv1.BackupConfig { SqlitePath: cfg.SQLitePath, RetentionDays: cfg.RetentionDays, KeepLast: cfg.KeepLast, - Postgres: &backupv1.SourceConfig{ - Host: cfg.Postgres.Host, - Port: cfg.Postgres.Port, - User: cfg.Postgres.User, - Password: cfg.Postgres.Password, - Database: cfg.Postgres.Database, - SslMode: cfg.Postgres.SSLMode, - ContainerName: cfg.Postgres.ContainerName, - }, - Redis: &backupv1.SourceConfig{ - Addr: cfg.Redis.Addr, - Username: cfg.Redis.Username, - Password: cfg.Redis.Password, - Db: cfg.Redis.DB, - ContainerName: cfg.Redis.ContainerName, - }, + Postgres: toProtoSourceConfig(cfg.Postgres), + Redis: toProtoSourceConfig(cfg.Redis), S3: &backupv1.S3Config{ Enabled: cfg.S3.Enabled, Endpoint: cfg.S3.Endpoint, @@ -299,7 +580,78 @@ func toProtoConfig(cfg *entstore.ConfigSnapshot) *backupv1.BackupConfig { ForcePathStyle: cfg.S3.ForcePathStyle, UseSsl: cfg.S3.UseSSL, }, + ActivePostgresProfileId: cfg.ActivePostgresID, + ActiveRedisProfileId: cfg.ActiveRedisID, + ActiveS3ProfileId: cfg.ActiveS3ProfileID, + } +} + +func toProtoSourceConfig(cfg entstore.SourceConfig) *backupv1.SourceConfig { + return &backupv1.SourceConfig{ + Host: cfg.Host, + Port: cfg.Port, + User: cfg.User, + Password: cfg.Password, + Database: cfg.Database, + SslMode: cfg.SSLMode, + Addr: cfg.Addr, + Username: cfg.Username, + Db: cfg.DB, + ContainerName: cfg.ContainerName, + } +} + +func toProtoS3Profile(profile *entstore.S3ProfileSnapshot) *backupv1.S3Profile { + if profile == nil { + return &backupv1.S3Profile{} + } + out := &backupv1.S3Profile{ + ProfileId: profile.ProfileID, + Name: profile.Name, + IsActive: profile.IsActive, + SecretAccessKeyConfigured: profile.SecretAccessKeyConfigured, + S3: &backupv1.S3Config{ + Enabled: profile.S3.Enabled, + Endpoint: profile.S3.Endpoint, + Region: profile.S3.Region, + Bucket: profile.S3.Bucket, + AccessKeyId: profile.S3.AccessKeyID, + Prefix: profile.S3.Prefix, + ForcePathStyle: profile.S3.ForcePathStyle, + UseSsl: profile.S3.UseSSL, + }, + } + if !profile.CreatedAt.IsZero() { + out.CreatedAt = profile.CreatedAt.UTC().Format(time.RFC3339) } + if !profile.UpdatedAt.IsZero() { + out.UpdatedAt = profile.UpdatedAt.UTC().Format(time.RFC3339) + } + return out +} + +func toProtoSourceProfile(profile *entstore.SourceProfileSnapshot) *backupv1.SourceProfile { + if profile == nil { + return &backupv1.SourceProfile{} + } + out := &backupv1.SourceProfile{ + SourceType: profile.SourceType, + ProfileId: profile.ProfileID, + Name: profile.Name, + IsActive: profile.IsActive, + Config: toProtoSourceConfig(profile.Config), + PasswordConfigured: profile.PasswordConfigured, + } + if out.GetConfig() != nil { + out.Config.Password = "" + } + if !profile.CreatedAt.IsZero() { + out.CreatedAt = profile.CreatedAt.UTC().Format(time.RFC3339) + } + if !profile.UpdatedAt.IsZero() { + out.UpdatedAt = profile.UpdatedAt.UTC().Format(time.RFC3339) + } + return out } func toProtoJob(job *ent.BackupJob) *backupv1.BackupJob { @@ -307,13 +659,16 @@ func toProtoJob(job *ent.BackupJob) *backupv1.BackupJob { return &backupv1.BackupJob{} } out := &backupv1.BackupJob{ - JobId: job.JobID, - BackupType: job.BackupType.String(), - Status: job.Status.String(), - TriggeredBy: job.TriggeredBy, - IdempotencyKey: job.IdempotencyKey, - UploadToS3: job.UploadToS3, - ErrorMessage: job.ErrorMessage, + JobId: job.JobID, + BackupType: job.BackupType.String(), + Status: job.Status.String(), + TriggeredBy: job.TriggeredBy, + IdempotencyKey: job.IdempotencyKey, + UploadToS3: job.UploadToS3, + S3ProfileId: job.S3ProfileID, + PostgresProfileId: job.PostgresProfileID, + RedisProfileId: job.RedisProfileID, + ErrorMessage: job.ErrorMessage, Artifact: &backupv1.BackupArtifact{ LocalPath: job.ArtifactLocalPath, SizeBytes: nillableInt64(job.ArtifactSizeBytes), diff --git a/backup/internal/store/entstore/store.go b/backup/internal/store/entstore/store.go index 219a0e239..b276e9279 100644 --- a/backup/internal/store/entstore/store.go +++ b/backup/internal/store/entstore/store.go @@ -23,8 +23,20 @@ import ( ) const ( - defaultSQLitePath = "/tmp/sub2api-backupd.db" - idempotencyWindow = 10 * time.Minute + defaultSQLitePath = "/tmp/sub2api-backupd.db" + idempotencyWindow = 10 * time.Minute + defaultS3ProfileID = "default" + defaultSourceID = "default" +) + +var ( + ErrS3ProfileInUse = errors.New("s3 profile has queued/running jobs") + ErrActiveS3Profile = errors.New("active s3 profile cannot be deleted") + ErrS3ProfileRequired = errors.New("s3 profile_id is required") + ErrSourceTypeInvalid = errors.New("source_type must be postgres or redis") + ErrSourceIDRequired = errors.New("source profile_id is required") + ErrSourceActive = errors.New("active source profile cannot be deleted") + ErrSourceInUse = errors.New("source profile has queued/running jobs") ) type SourceConfig struct { @@ -53,14 +65,66 @@ type S3Config struct { } type ConfigSnapshot struct { - SourceMode string - BackupRoot string - SQLitePath string - RetentionDays int32 - KeepLast int32 - Postgres SourceConfig - Redis SourceConfig - S3 S3Config + SourceMode string + BackupRoot string + SQLitePath string + RetentionDays int32 + KeepLast int32 + Postgres SourceConfig + Redis SourceConfig + S3 S3Config + ActivePostgresID string + ActiveRedisID string + ActiveS3ProfileID string +} + +type SourceProfileSnapshot struct { + SourceType string + ProfileID string + Name string + IsActive bool + Config SourceConfig + PasswordConfigured bool + CreatedAt time.Time + UpdatedAt time.Time +} + +type CreateSourceProfileInput struct { + SourceType string + ProfileID string + Name string + Config SourceConfig + SetActive bool +} + +type UpdateSourceProfileInput struct { + SourceType string + ProfileID string + Name string + Config SourceConfig +} + +type S3ProfileSnapshot struct { + ProfileID string + Name string + IsActive bool + S3 S3Config + SecretAccessKeyConfigured bool + CreatedAt time.Time + UpdatedAt time.Time +} + +type CreateS3ProfileInput struct { + ProfileID string + Name string + S3 S3Config + SetActive bool +} + +type UpdateS3ProfileInput struct { + ProfileID string + Name string + S3 S3Config } type CreateBackupJobInput struct { @@ -68,6 +132,9 @@ type CreateBackupJobInput struct { UploadToS3 bool TriggeredBy string IdempotencyKey string + S3ProfileID string + PostgresID string + RedisID string } type ListBackupJobsInput struct { @@ -156,15 +223,15 @@ func (s *Store) GetConfig(ctx context.Context) (*ConfigSnapshot, error) { if err != nil { return nil, err } - postgresCfg, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypePostgres) + postgresCfg, err := s.getActiveSourceConfigEntity(ctx, backupsourceconfig.SourceTypePostgres.String()) if err != nil { return nil, err } - redisCfg, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypeRedis) + redisCfg, err := s.getActiveSourceConfigEntity(ctx, backupsourceconfig.SourceTypeRedis.String()) if err != nil { return nil, err } - s3Cfg, err := s.client.BackupS3Config.Query().Order(ent.Asc(backups3config.FieldID)).First(ctx) + s3Cfg, err := s.getActiveS3ConfigEntity(ctx) if err != nil { return nil, err } @@ -202,15 +269,582 @@ func (s *Store) GetConfig(ctx context.Context) (*ConfigSnapshot, error) { ForcePathStyle: s3Cfg.ForcePathStyle, UseSSL: s3Cfg.UseSsl, }, + ActivePostgresID: postgresCfg.ProfileID, + ActiveRedisID: redisCfg.ProfileID, + ActiveS3ProfileID: s3Cfg.ProfileID, + } + return cfg, nil +} + +func (s *Store) UpdateConfig(ctx context.Context, cfg ConfigSnapshot) (*ConfigSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + setting, err := tx.BackupSetting.Query().Order(ent.Asc(backupsetting.FieldID)).First(ctx) + if err != nil { + return nil, err + } + updatedSetting := tx.BackupSetting.UpdateOneID(setting.ID). + SetSourceMode(backupsetting.SourceMode(cfg.SourceMode)). + SetBackupRoot(strings.TrimSpace(cfg.BackupRoot)). + SetRetentionDays(int(cfg.RetentionDays)). + SetKeepLast(int(cfg.KeepLast)). + SetSqlitePath(strings.TrimSpace(cfg.SQLitePath)) + if _, err = updatedSetting.Save(ctx); err != nil { + return nil, err + } + + if err = s.updateSourceConfigTx( + ctx, + tx, + backupsourceconfig.SourceTypePostgres.String(), + strings.TrimSpace(cfg.ActivePostgresID), + cfg.Postgres, + ); err != nil { + return nil, err + } + if err = s.updateSourceConfigTx( + ctx, + tx, + backupsourceconfig.SourceTypeRedis.String(), + strings.TrimSpace(cfg.ActiveRedisID), + cfg.Redis, + ); err != nil { + return nil, err + } + + s3Entity, err := tx.BackupS3Config.Query(). + Where(backups3config.IsActiveEQ(true)). + Order(ent.Asc(backups3config.FieldID)). + First(ctx) + if err != nil { + return nil, err + } + s3Updater := tx.BackupS3Config.UpdateOneID(s3Entity.ID). + SetEnabled(cfg.S3.Enabled). + SetEndpoint(strings.TrimSpace(cfg.S3.Endpoint)). + SetRegion(strings.TrimSpace(cfg.S3.Region)). + SetBucket(strings.TrimSpace(cfg.S3.Bucket)). + SetAccessKeyID(strings.TrimSpace(cfg.S3.AccessKeyID)). + SetPrefix(strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/")). + SetForcePathStyle(cfg.S3.ForcePathStyle). + SetUseSsl(cfg.S3.UseSSL) + if strings.TrimSpace(cfg.S3.SecretAccessKey) != "" { + s3Updater.SetSecretAccessKeyEncrypted(strings.TrimSpace(cfg.S3.SecretAccessKey)) + } + if _, err = s3Updater.Save(ctx); err != nil { + return nil, err + } + + if err = tx.Commit(); err != nil { + return nil, err + } + return s.GetConfig(ctx) +} + +func (s *Store) ListS3Profiles(ctx context.Context) ([]*S3ProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + items, err := s.client.BackupS3Config.Query(). + Order(ent.Desc(backups3config.FieldIsActive), ent.Asc(backups3config.FieldID)). + All(ctx) + if err != nil { + return nil, err + } + + out := make([]*S3ProfileSnapshot, 0, len(items)) + for _, item := range items { + out = append(out, toS3ProfileSnapshot(item)) + } + return out, nil +} + +func (s *Store) GetS3Profile(ctx context.Context, profileID string) (*S3ProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + profileID = strings.TrimSpace(profileID) + if profileID == "" { + return nil, ErrS3ProfileRequired + } + entity, err := s.client.BackupS3Config.Query(). + Where(backups3config.ProfileIDEQ(profileID)). + First(ctx) + if err != nil { + return nil, err + } + return toS3ProfileSnapshot(entity), nil +} + +func (s *Store) CreateS3Profile(ctx context.Context, input CreateS3ProfileInput) (*S3ProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + profileID := strings.TrimSpace(input.ProfileID) + if profileID == "" { + return nil, ErrS3ProfileRequired + } + name := strings.TrimSpace(input.Name) + if name == "" { + return nil, errors.New("s3 profile name is required") + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + activeCount, err := tx.BackupS3Config.Query().Where(backups3config.IsActiveEQ(true)).Count(ctx) + if err != nil { + return nil, err + } + setActive := input.SetActive || activeCount == 0 + if setActive { + if _, err = tx.BackupS3Config.Update(). + Where(backups3config.IsActiveEQ(true)). + SetIsActive(false). + Save(ctx); err != nil { + return nil, err + } + } + + builder := tx.BackupS3Config.Create(). + SetProfileID(profileID). + SetName(name). + SetIsActive(setActive). + SetEnabled(input.S3.Enabled). + SetEndpoint(strings.TrimSpace(input.S3.Endpoint)). + SetRegion(strings.TrimSpace(input.S3.Region)). + SetBucket(strings.TrimSpace(input.S3.Bucket)). + SetAccessKeyID(strings.TrimSpace(input.S3.AccessKeyID)). + SetPrefix(strings.Trim(strings.TrimSpace(input.S3.Prefix), "/")). + SetForcePathStyle(input.S3.ForcePathStyle). + SetUseSsl(input.S3.UseSSL) + if secret := strings.TrimSpace(input.S3.SecretAccessKey); secret != "" { + builder.SetSecretAccessKeyEncrypted(secret) + } + + if _, err = builder.Save(ctx); err != nil { + return nil, err + } + if err = tx.Commit(); err != nil { + return nil, err + } + return s.GetS3Profile(ctx, profileID) +} + +func (s *Store) UpdateS3Profile(ctx context.Context, input UpdateS3ProfileInput) (*S3ProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + profileID := strings.TrimSpace(input.ProfileID) + if profileID == "" { + return nil, ErrS3ProfileRequired + } + name := strings.TrimSpace(input.Name) + if name == "" { + return nil, errors.New("s3 profile name is required") + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + entity, err := tx.BackupS3Config.Query(). + Where(backups3config.ProfileIDEQ(profileID)). + First(ctx) + if err != nil { + return nil, err + } + + updater := tx.BackupS3Config.UpdateOneID(entity.ID). + SetName(name). + SetEnabled(input.S3.Enabled). + SetEndpoint(strings.TrimSpace(input.S3.Endpoint)). + SetRegion(strings.TrimSpace(input.S3.Region)). + SetBucket(strings.TrimSpace(input.S3.Bucket)). + SetAccessKeyID(strings.TrimSpace(input.S3.AccessKeyID)). + SetPrefix(strings.Trim(strings.TrimSpace(input.S3.Prefix), "/")). + SetForcePathStyle(input.S3.ForcePathStyle). + SetUseSsl(input.S3.UseSSL) + if secret := strings.TrimSpace(input.S3.SecretAccessKey); secret != "" { + updater.SetSecretAccessKeyEncrypted(secret) + } + if _, err = updater.Save(ctx); err != nil { + return nil, err + } + if err = tx.Commit(); err != nil { + return nil, err + } + return s.GetS3Profile(ctx, profileID) +} + +func (s *Store) DeleteS3Profile(ctx context.Context, profileID string) error { + if err := s.ensureDefaults(ctx); err != nil { + return err + } + + profileID = strings.TrimSpace(profileID) + if profileID == "" { + return ErrS3ProfileRequired + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + entity, err := tx.BackupS3Config.Query(). + Where(backups3config.ProfileIDEQ(profileID)). + First(ctx) + if err != nil { + return err + } + if entity.IsActive { + _ = tx.Rollback() + return ErrActiveS3Profile + } + + pendingCount, err := tx.BackupJob.Query(). + Where( + backupjob.S3ProfileIDEQ(profileID), + backupjob.UploadToS3EQ(true), + backupjob.Or( + backupjob.StatusEQ(backupjob.StatusQueued), + backupjob.StatusEQ(backupjob.StatusRunning), + ), + ). + Count(ctx) + if err != nil { + return err + } + if pendingCount > 0 { + _ = tx.Rollback() + return ErrS3ProfileInUse + } + + if err = tx.BackupS3Config.DeleteOneID(entity.ID).Exec(ctx); err != nil { + return err + } + return tx.Commit() +} + +func (s *Store) SetActiveS3Profile(ctx context.Context, profileID string) (*S3ProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + profileID = strings.TrimSpace(profileID) + if profileID == "" { + return nil, ErrS3ProfileRequired + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + entity, err := tx.BackupS3Config.Query(). + Where(backups3config.ProfileIDEQ(profileID)). + First(ctx) + if err != nil { + return nil, err + } + + if !entity.IsActive { + if _, err = tx.BackupS3Config.Update(). + Where(backups3config.IsActiveEQ(true)). + SetIsActive(false). + Save(ctx); err != nil { + return nil, err + } + if _, err = tx.BackupS3Config.UpdateOneID(entity.ID). + SetIsActive(true). + Save(ctx); err != nil { + return nil, err + } + } + + if err = tx.Commit(); err != nil { + return nil, err + } + return s.GetS3Profile(ctx, profileID) +} + +func (s *Store) ListSourceProfiles(ctx context.Context, sourceType string) ([]*SourceProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + enumType, err := parseSourceType(sourceType) + if err != nil { + return nil, err + } + items, err := s.client.BackupSourceConfig.Query(). + Where(backupsourceconfig.SourceTypeEQ(enumType)). + Order(ent.Desc(backupsourceconfig.FieldIsActive), ent.Asc(backupsourceconfig.FieldID)). + All(ctx) + if err != nil { + return nil, err + } + + out := make([]*SourceProfileSnapshot, 0, len(items)) + for _, item := range items { + out = append(out, toSourceProfileSnapshot(item)) + } + return out, nil +} + +func (s *Store) GetSourceProfile(ctx context.Context, sourceType, profileID string) (*SourceProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + enumType, err := parseSourceType(sourceType) + if err != nil { + return nil, err + } + normalizedProfileID := strings.TrimSpace(profileID) + if normalizedProfileID == "" { + return nil, ErrSourceIDRequired + } + + entity, err := s.client.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.ProfileIDEQ(normalizedProfileID), + ). + First(ctx) + if err != nil { + return nil, err + } + return toSourceProfileSnapshot(entity), nil +} + +func (s *Store) CreateSourceProfile(ctx context.Context, input CreateSourceProfileInput) (*SourceProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + enumType, err := parseSourceType(input.SourceType) + if err != nil { + return nil, err + } + profileID := strings.TrimSpace(input.ProfileID) + if profileID == "" { + return nil, ErrSourceIDRequired + } + name := strings.TrimSpace(input.Name) + if name == "" { + return nil, errors.New("source profile name is required") + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return nil, err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + activeCount, err := tx.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.IsActiveEQ(true), + ). + Count(ctx) + if err != nil { + return nil, err + } + setActive := input.SetActive || activeCount == 0 + if setActive { + if _, err = tx.BackupSourceConfig.Update(). + Where(backupsourceconfig.SourceTypeEQ(enumType), backupsourceconfig.IsActiveEQ(true)). + SetIsActive(false). + Save(ctx); err != nil { + return nil, err + } + } + + create := tx.BackupSourceConfig.Create(). + SetSourceType(enumType). + SetProfileID(profileID). + SetName(name). + SetIsActive(setActive) + applySourceConfigCreate(create, enumType, input.Config) + if _, err = create.Save(ctx); err != nil { + return nil, err + } + if err = tx.Commit(); err != nil { + return nil, err + } + return s.GetSourceProfile(ctx, enumType.String(), profileID) +} + +func (s *Store) UpdateSourceProfile(ctx context.Context, input UpdateSourceProfileInput) (*SourceProfileSnapshot, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, err + } + + enumType, err := parseSourceType(input.SourceType) + if err != nil { + return nil, err + } + profileID := strings.TrimSpace(input.ProfileID) + if profileID == "" { + return nil, ErrSourceIDRequired + } + name := strings.TrimSpace(input.Name) + if name == "" { + return nil, errors.New("source profile name is required") + } + + entity, err := s.client.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.ProfileIDEQ(profileID), + ). + First(ctx) + if err != nil { + return nil, err + } + + updater := s.client.BackupSourceConfig.UpdateOneID(entity.ID). + SetName(name) + applySourceConfigUpdate(updater, enumType, input.Config) + if _, err = updater.Save(ctx); err != nil { + return nil, err + } + return s.GetSourceProfile(ctx, enumType.String(), profileID) +} + +func (s *Store) DeleteSourceProfile(ctx context.Context, sourceType, profileID string) error { + if err := s.ensureDefaults(ctx); err != nil { + return err + } + + enumType, err := parseSourceType(sourceType) + if err != nil { + return err + } + normalizedProfileID := strings.TrimSpace(profileID) + if normalizedProfileID == "" { + return ErrSourceIDRequired + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return err + } + defer func() { + if err != nil { + _ = tx.Rollback() + } + }() + + entity, err := tx.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.ProfileIDEQ(normalizedProfileID), + ). + First(ctx) + if err != nil { + return err } - return cfg, nil + if entity.IsActive { + _ = tx.Rollback() + return ErrSourceActive + } + + inUseCount := 0 + switch enumType { + case backupsourceconfig.SourceTypePostgres: + inUseCount, err = tx.BackupJob.Query(). + Where( + backupjob.PostgresProfileIDEQ(normalizedProfileID), + backupjob.Or( + backupjob.StatusEQ(backupjob.StatusQueued), + backupjob.StatusEQ(backupjob.StatusRunning), + ), + ). + Count(ctx) + case backupsourceconfig.SourceTypeRedis: + inUseCount, err = tx.BackupJob.Query(). + Where( + backupjob.RedisProfileIDEQ(normalizedProfileID), + backupjob.Or( + backupjob.StatusEQ(backupjob.StatusQueued), + backupjob.StatusEQ(backupjob.StatusRunning), + ), + ). + Count(ctx) + } + if err != nil { + return err + } + if inUseCount > 0 { + _ = tx.Rollback() + return ErrSourceInUse + } + + if err = tx.BackupSourceConfig.DeleteOneID(entity.ID).Exec(ctx); err != nil { + return err + } + return tx.Commit() } -func (s *Store) UpdateConfig(ctx context.Context, cfg ConfigSnapshot) (*ConfigSnapshot, error) { +func (s *Store) SetActiveSourceProfile(ctx context.Context, sourceType, profileID string) (*SourceProfileSnapshot, error) { if err := s.ensureDefaults(ctx); err != nil { return nil, err } + enumType, err := parseSourceType(sourceType) + if err != nil { + return nil, err + } + normalizedProfileID := strings.TrimSpace(profileID) + if normalizedProfileID == "" { + return nil, ErrSourceIDRequired + } + tx, err := s.client.Tx(ctx) if err != nil { return nil, err @@ -221,57 +855,70 @@ func (s *Store) UpdateConfig(ctx context.Context, cfg ConfigSnapshot) (*ConfigSn } }() - setting, err := tx.BackupSetting.Query().Order(ent.Asc(backupsetting.FieldID)).First(ctx) + entity, err := tx.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.ProfileIDEQ(normalizedProfileID), + ). + First(ctx) if err != nil { return nil, err } - updatedSetting := tx.BackupSetting.UpdateOneID(setting.ID). - SetSourceMode(backupsetting.SourceMode(cfg.SourceMode)). - SetBackupRoot(strings.TrimSpace(cfg.BackupRoot)). - SetRetentionDays(int(cfg.RetentionDays)). - SetKeepLast(int(cfg.KeepLast)). - SetSqlitePath(strings.TrimSpace(cfg.SQLitePath)) - if _, err = updatedSetting.Save(ctx); err != nil { - return nil, err - } - - if err = s.upsertSourceConfigTx(ctx, tx, backupsourceconfig.SourceTypePostgres, cfg.Postgres); err != nil { - return nil, err - } - if err = s.upsertSourceConfigTx(ctx, tx, backupsourceconfig.SourceTypeRedis, cfg.Redis); err != nil { - return nil, err - } - s3Entity, err := tx.BackupS3Config.Query().Order(ent.Asc(backups3config.FieldID)).First(ctx) - if err != nil { - return nil, err - } - s3Updater := tx.BackupS3Config.UpdateOneID(s3Entity.ID). - SetEnabled(cfg.S3.Enabled). - SetEndpoint(strings.TrimSpace(cfg.S3.Endpoint)). - SetRegion(strings.TrimSpace(cfg.S3.Region)). - SetBucket(strings.TrimSpace(cfg.S3.Bucket)). - SetAccessKeyID(strings.TrimSpace(cfg.S3.AccessKeyID)). - SetPrefix(strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/")). - SetForcePathStyle(cfg.S3.ForcePathStyle). - SetUseSsl(cfg.S3.UseSSL) - if strings.TrimSpace(cfg.S3.SecretAccessKey) != "" { - s3Updater.SetSecretAccessKeyEncrypted(strings.TrimSpace(cfg.S3.SecretAccessKey)) - } - if _, err = s3Updater.Save(ctx); err != nil { - return nil, err + if !entity.IsActive { + if _, err = tx.BackupSourceConfig.Update(). + Where(backupsourceconfig.SourceTypeEQ(enumType), backupsourceconfig.IsActiveEQ(true)). + SetIsActive(false). + Save(ctx); err != nil { + return nil, err + } + if _, err = tx.BackupSourceConfig.UpdateOneID(entity.ID). + SetIsActive(true). + Save(ctx); err != nil { + return nil, err + } } if err = tx.Commit(); err != nil { return nil, err } - return s.GetConfig(ctx) + return s.GetSourceProfile(ctx, enumType.String(), normalizedProfileID) } func (s *Store) CreateBackupJob(ctx context.Context, input CreateBackupJobInput) (*ent.BackupJob, bool, error) { + if err := s.ensureDefaults(ctx); err != nil { + return nil, false, err + } if strings.TrimSpace(input.TriggeredBy) == "" { input.TriggeredBy = "admin:unknown" } + input.BackupType = strings.TrimSpace(input.BackupType) + input.S3ProfileID = strings.TrimSpace(input.S3ProfileID) + input.PostgresID = strings.TrimSpace(input.PostgresID) + input.RedisID = strings.TrimSpace(input.RedisID) + + if backupTypeNeedsPostgres(input.BackupType) { + resolvedID, resolveErr := s.resolveSourceProfileID(ctx, backupsourceconfig.SourceTypePostgres.String(), input.PostgresID) + if resolveErr != nil { + return nil, false, resolveErr + } + input.PostgresID = resolvedID + } + if backupTypeNeedsRedis(input.BackupType) { + resolvedID, resolveErr := s.resolveSourceProfileID(ctx, backupsourceconfig.SourceTypeRedis.String(), input.RedisID) + if resolveErr != nil { + return nil, false, resolveErr + } + input.RedisID = resolvedID + } + + if input.S3ProfileID != "" { + if _, err := s.client.BackupS3Config.Query(). + Where(backups3config.ProfileIDEQ(input.S3ProfileID)). + First(ctx); err != nil { + return nil, false, err + } + } now := time.Now() if strings.TrimSpace(input.IdempotencyKey) != "" { @@ -299,6 +946,15 @@ func (s *Store) CreateBackupJob(ctx context.Context, input CreateBackupJobInput) SetStatus(backupjob.StatusQueued). SetTriggeredBy(strings.TrimSpace(input.TriggeredBy)). SetUploadToS3(input.UploadToS3) + if input.S3ProfileID != "" { + builder.SetS3ProfileID(input.S3ProfileID) + } + if input.PostgresID != "" { + builder.SetPostgresProfileID(input.PostgresID) + } + if input.RedisID != "" { + builder.SetRedisProfileID(input.RedisID) + } if strings.TrimSpace(input.IdempotencyKey) != "" { builder.SetIdempotencyKey(strings.TrimSpace(input.IdempotencyKey)) } @@ -508,8 +1164,112 @@ func (s *Store) GetBackupJob(ctx context.Context, jobID string) (*ent.BackupJob, return s.client.BackupJob.Query().Where(backupjob.JobIDEQ(strings.TrimSpace(jobID))).First(ctx) } -func (s *Store) getSourceConfig(ctx context.Context, sourceType backupsourceconfig.SourceType) (*ent.BackupSourceConfig, error) { - return s.client.BackupSourceConfig.Query().Where(backupsourceconfig.SourceTypeEQ(sourceType)).First(ctx) +func (s *Store) getSourceConfigEntity(ctx context.Context, sourceType, profileID string) (*ent.BackupSourceConfig, error) { + enumType, err := parseSourceType(sourceType) + if err != nil { + return nil, err + } + normalizedProfileID := strings.TrimSpace(profileID) + if normalizedProfileID == "" { + return nil, ErrSourceIDRequired + } + return s.client.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.ProfileIDEQ(normalizedProfileID), + ). + First(ctx) +} + +func (s *Store) getActiveSourceConfigEntity(ctx context.Context, sourceType string) (*ent.BackupSourceConfig, error) { + enumType, err := parseSourceType(sourceType) + if err != nil { + return nil, err + } + entity, err := s.client.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.IsActiveEQ(true), + ). + Order(ent.Asc(backupsourceconfig.FieldID)). + First(ctx) + if err == nil { + return entity, nil + } + if !ent.IsNotFound(err) { + return nil, err + } + return s.client.BackupSourceConfig.Query(). + Where(backupsourceconfig.SourceTypeEQ(enumType)). + Order(ent.Asc(backupsourceconfig.FieldID)). + First(ctx) +} + +func (s *Store) getActiveS3ConfigEntity(ctx context.Context) (*ent.BackupS3Config, error) { + entity, err := s.client.BackupS3Config.Query(). + Where(backups3config.IsActiveEQ(true)). + Order(ent.Asc(backups3config.FieldID)). + First(ctx) + if err == nil { + return entity, nil + } + if !ent.IsNotFound(err) { + return nil, err + } + return s.client.BackupS3Config.Query().Order(ent.Asc(backups3config.FieldID)).First(ctx) +} + +func toS3ProfileSnapshot(entity *ent.BackupS3Config) *S3ProfileSnapshot { + if entity == nil { + return &S3ProfileSnapshot{} + } + return &S3ProfileSnapshot{ + ProfileID: entity.ProfileID, + Name: entity.Name, + IsActive: entity.IsActive, + S3: S3Config{ + Enabled: entity.Enabled, + Endpoint: entity.Endpoint, + Region: entity.Region, + Bucket: entity.Bucket, + AccessKeyID: entity.AccessKeyID, + SecretAccessKey: entity.SecretAccessKeyEncrypted, + Prefix: entity.Prefix, + ForcePathStyle: entity.ForcePathStyle, + UseSSL: entity.UseSsl, + }, + SecretAccessKeyConfigured: strings.TrimSpace(entity.SecretAccessKeyEncrypted) != "", + CreatedAt: entity.CreatedAt, + UpdatedAt: entity.UpdatedAt, + } +} + +func toSourceProfileSnapshot(entity *ent.BackupSourceConfig) *SourceProfileSnapshot { + if entity == nil { + return &SourceProfileSnapshot{} + } + config := SourceConfig{ + Host: entity.Host, + Port: int32(nillableInt(entity.Port)), + User: entity.Username, + Username: entity.Username, + Password: entity.PasswordEncrypted, + Database: entity.Database, + SSLMode: entity.SslMode, + Addr: entity.Addr, + DB: int32(nillableInt(entity.RedisDb)), + ContainerName: entity.ContainerName, + } + return &SourceProfileSnapshot{ + SourceType: entity.SourceType.String(), + ProfileID: entity.ProfileID, + Name: entity.Name, + IsActive: entity.IsActive, + Config: config, + PasswordConfigured: strings.TrimSpace(entity.PasswordEncrypted) != "", + CreatedAt: entity.CreatedAt, + UpdatedAt: entity.UpdatedAt, + } } func (s *Store) appendJobEventByEntityID(ctx context.Context, backupJobID int, level backupjobevent.Level, eventType, message, payload string) error { @@ -525,31 +1285,162 @@ func (s *Store) appendJobEventByEntityID(ctx context.Context, backupJobID int, l return err } -func (s *Store) upsertSourceConfigTx(ctx context.Context, tx *ent.Tx, sourceType backupsourceconfig.SourceType, cfg SourceConfig) error { - entity, err := tx.BackupSourceConfig.Query().Where(backupsourceconfig.SourceTypeEQ(sourceType)).First(ctx) +func (s *Store) updateSourceConfigTx(ctx context.Context, tx *ent.Tx, sourceType, profileID string, cfg SourceConfig) error { + entity, enumType, err := s.resolveSourceEntityTx(ctx, tx, sourceType, profileID) if err != nil { return err } - updater := tx.BackupSourceConfig.UpdateOneID(entity.ID). - SetHost(strings.TrimSpace(cfg.Host)). - SetPort(int(cfg.Port)). - SetUsername(strings.TrimSpace(cfg.User)). - SetDatabase(strings.TrimSpace(cfg.Database)). - SetSslMode(strings.TrimSpace(cfg.SSLMode)). - SetAddr(strings.TrimSpace(cfg.Addr)). - SetRedisDb(int(cfg.DB)). - SetContainerName(strings.TrimSpace(cfg.ContainerName)) - if strings.TrimSpace(cfg.Username) != "" { - updater.SetUsername(strings.TrimSpace(cfg.Username)) - } - if strings.TrimSpace(cfg.Password) != "" { - updater.SetPasswordEncrypted(strings.TrimSpace(cfg.Password)) - } + updater := tx.BackupSourceConfig.UpdateOneID(entity.ID) + applySourceConfigUpdate(updater, enumType, cfg) _, err = updater.Save(ctx) return err } +func (s *Store) resolveSourceEntityTx( + ctx context.Context, + tx *ent.Tx, + sourceType, + profileID string, +) (*ent.BackupSourceConfig, backupsourceconfig.SourceType, error) { + enumType, err := parseSourceType(sourceType) + if err != nil { + return nil, "", err + } + normalizedProfileID := strings.TrimSpace(profileID) + if normalizedProfileID != "" { + entity, queryErr := tx.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.ProfileIDEQ(normalizedProfileID), + ). + First(ctx) + return entity, enumType, queryErr + } + + entity, queryErr := tx.BackupSourceConfig.Query(). + Where( + backupsourceconfig.SourceTypeEQ(enumType), + backupsourceconfig.IsActiveEQ(true), + ). + Order(ent.Asc(backupsourceconfig.FieldID)). + First(ctx) + if queryErr == nil { + return entity, enumType, nil + } + if !ent.IsNotFound(queryErr) { + return nil, "", queryErr + } + entity, queryErr = tx.BackupSourceConfig.Query(). + Where(backupsourceconfig.SourceTypeEQ(enumType)). + Order(ent.Asc(backupsourceconfig.FieldID)). + First(ctx) + return entity, enumType, queryErr +} + +func applySourceConfigCreate(builder *ent.BackupSourceConfigCreate, sourceType backupsourceconfig.SourceType, cfg SourceConfig) { + applySourceConfigCore(sourceType, cfg, func(host, username, database, sslMode, addr, containerName string, port, redisDB *int) { + builder. + SetHost(host). + SetUsername(username). + SetDatabase(database). + SetSslMode(sslMode). + SetAddr(addr). + SetContainerName(containerName) + if port != nil { + builder.SetPort(*port) + } + if redisDB != nil { + builder.SetRedisDb(*redisDB) + } + }) + if password := strings.TrimSpace(cfg.Password); password != "" { + builder.SetPasswordEncrypted(password) + } +} + +func applySourceConfigUpdate(builder *ent.BackupSourceConfigUpdateOne, sourceType backupsourceconfig.SourceType, cfg SourceConfig) { + applySourceConfigCore(sourceType, cfg, func(host, username, database, sslMode, addr, containerName string, port, redisDB *int) { + builder. + SetHost(host). + SetUsername(username). + SetDatabase(database). + SetSslMode(sslMode). + SetAddr(addr). + SetContainerName(containerName) + if port != nil { + builder.SetPort(*port) + } else { + builder.ClearPort() + } + if redisDB != nil { + builder.SetRedisDb(*redisDB) + } else { + builder.ClearRedisDb() + } + }) + if password := strings.TrimSpace(cfg.Password); password != "" { + builder.SetPasswordEncrypted(password) + } +} + +func applySourceConfigCore( + sourceType backupsourceconfig.SourceType, + cfg SourceConfig, + apply func(host, username, database, sslMode, addr, containerName string, port, redisDB *int), +) { + host := strings.TrimSpace(cfg.Host) + username := strings.TrimSpace(cfg.User) + if username == "" { + username = strings.TrimSpace(cfg.Username) + } + database := strings.TrimSpace(cfg.Database) + sslMode := strings.TrimSpace(cfg.SSLMode) + addr := strings.TrimSpace(cfg.Addr) + containerName := strings.TrimSpace(cfg.ContainerName) + + var portPtr *int + if cfg.Port > 0 { + portValue := int(cfg.Port) + portPtr = &portValue + } + var redisDBPtr *int + if cfg.DB >= 0 { + dbValue := int(cfg.DB) + redisDBPtr = &dbValue + } + + switch sourceType { + case backupsourceconfig.SourceTypePostgres: + if host == "" { + host = "127.0.0.1" + } + if username == "" { + username = "postgres" + } + if database == "" { + database = "sub2api" + } + if sslMode == "" { + sslMode = "disable" + } + if portPtr == nil { + portValue := 5432 + portPtr = &portValue + } + case backupsourceconfig.SourceTypeRedis: + if addr == "" { + addr = "127.0.0.1:6379" + } + if redisDBPtr == nil { + dbValue := 0 + redisDBPtr = &dbValue + } + } + + apply(host, username, database, sslMode, addr, containerName, portPtr, redisDBPtr) +} + func (s *Store) ensureDefaults(ctx context.Context) error { if _, err := s.client.BackupSetting.Query().First(ctx); err != nil { if !ent.IsNotFound(err) { @@ -566,42 +1457,33 @@ func (s *Store) ensureDefaults(ctx context.Context) error { } } - if _, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypePostgres); err != nil { - if !ent.IsNotFound(err) { - return err - } - if _, err := s.client.BackupSourceConfig.Create(). - SetSourceType(backupsourceconfig.SourceTypePostgres). - SetHost("127.0.0.1"). - SetPort(5432). - SetUsername("postgres"). - SetDatabase("sub2api"). - SetSslMode("disable"). - SetContainerName(""). - Save(ctx); err != nil { - return err - } + if err := s.ensureSourceDefaultsByType(ctx, backupsourceconfig.SourceTypePostgres, "默认 PostgreSQL", SourceConfig{ + Host: "127.0.0.1", + Port: 5432, + User: "postgres", + Database: "sub2api", + SSLMode: "disable", + }); err != nil { + return err } - - if _, err := s.getSourceConfig(ctx, backupsourceconfig.SourceTypeRedis); err != nil { - if !ent.IsNotFound(err) { - return err - } - if _, err := s.client.BackupSourceConfig.Create(). - SetSourceType(backupsourceconfig.SourceTypeRedis). - SetAddr("127.0.0.1:6379"). - SetRedisDb(0). - SetContainerName(""). - Save(ctx); err != nil { - return err - } + if err := s.ensureSourceDefaultsByType(ctx, backupsourceconfig.SourceTypeRedis, "默认 Redis", SourceConfig{ + Addr: "127.0.0.1:6379", + DB: 0, + }); err != nil { + return err } - if _, err := s.client.BackupS3Config.Query().First(ctx); err != nil { - if !ent.IsNotFound(err) { - return err - } - if _, err := s.client.BackupS3Config.Create(). + profiles, err := s.client.BackupS3Config.Query(). + Order(ent.Asc(backups3config.FieldID)). + All(ctx) + if err != nil { + return err + } + if len(profiles) == 0 { + _, err = s.client.BackupS3Config.Create(). + SetProfileID(defaultS3ProfileID). + SetName("默认账号"). + SetIsActive(true). SetEnabled(false). SetEndpoint(""). SetRegion(""). @@ -610,13 +1492,255 @@ func (s *Store) ensureDefaults(ctx context.Context) error { SetPrefix(""). SetForcePathStyle(false). SetUseSsl(true). - Save(ctx); err != nil { + Save(ctx) + return err + } + + used := make(map[string]struct{}, len(profiles)) + normalizedIDs := make([]string, len(profiles)) + normalizedNames := make([]string, len(profiles)) + activeIndex := -1 + needFix := false + + for idx, profile := range profiles { + profileID := strings.TrimSpace(profile.ProfileID) + if profileID == "" { + needFix = true + if idx == 0 { + profileID = defaultS3ProfileID + } else { + profileID = fmt.Sprintf("profile-%d", profile.ID) + } + } + base := profileID + seq := 2 + for { + if _, exists := used[profileID]; !exists { + break + } + needFix = true + profileID = fmt.Sprintf("%s-%d", base, seq) + seq++ + } + used[profileID] = struct{}{} + normalizedIDs[idx] = profileID + + name := strings.TrimSpace(profile.Name) + if name == "" { + needFix = true + name = profileID + } + normalizedNames[idx] = name + + if profile.IsActive { + if activeIndex == -1 { + activeIndex = idx + } else { + needFix = true + } + } + } + if activeIndex == -1 { + needFix = true + activeIndex = 0 + } + if !needFix { + return nil + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return err + } + for idx, profile := range profiles { + changed := false + updater := tx.BackupS3Config.UpdateOneID(profile.ID) + + if profile.ProfileID != normalizedIDs[idx] { + updater.SetProfileID(normalizedIDs[idx]) + changed = true + } + if strings.TrimSpace(profile.Name) != normalizedNames[idx] { + updater.SetName(normalizedNames[idx]) + changed = true + } + shouldActive := idx == activeIndex + if profile.IsActive != shouldActive { + updater.SetIsActive(shouldActive) + changed = true + } + if !changed { + continue + } + if _, err = updater.Save(ctx); err != nil { + _ = tx.Rollback() + return err + } + } + if err = tx.Commit(); err != nil { + return err + } + return nil +} + +func (s *Store) ensureSourceDefaultsByType( + ctx context.Context, + sourceType backupsourceconfig.SourceType, + defaultName string, + defaultCfg SourceConfig, +) error { + items, err := s.client.BackupSourceConfig.Query(). + Where(backupsourceconfig.SourceTypeEQ(sourceType)). + Order(ent.Asc(backupsourceconfig.FieldID)). + All(ctx) + if err != nil { + return err + } + + if len(items) == 0 { + builder := s.client.BackupSourceConfig.Create(). + SetSourceType(sourceType). + SetProfileID(defaultSourceID). + SetName(defaultName). + SetIsActive(true) + applySourceConfigCreate(builder, sourceType, defaultCfg) + _, err = builder.Save(ctx) + return err + } + + used := make(map[string]struct{}, len(items)) + normalizedIDs := make([]string, len(items)) + normalizedNames := make([]string, len(items)) + activeIndex := -1 + needFix := false + + for idx, item := range items { + profileID := strings.TrimSpace(item.ProfileID) + if profileID == "" { + needFix = true + if idx == 0 { + profileID = defaultSourceID + } else { + profileID = fmt.Sprintf("profile-%d", item.ID) + } + } + base := profileID + seq := 2 + for { + if _, exists := used[profileID]; !exists { + break + } + needFix = true + profileID = fmt.Sprintf("%s-%d", base, seq) + seq++ + } + used[profileID] = struct{}{} + normalizedIDs[idx] = profileID + + name := strings.TrimSpace(item.Name) + if name == "" { + needFix = true + name = profileID + } + normalizedNames[idx] = name + + if item.IsActive { + if activeIndex == -1 { + activeIndex = idx + } else { + needFix = true + } + } + } + if activeIndex == -1 { + needFix = true + activeIndex = 0 + } + if !needFix { + return nil + } + + tx, err := s.client.Tx(ctx) + if err != nil { + return err + } + for idx, item := range items { + changed := false + updater := tx.BackupSourceConfig.UpdateOneID(item.ID) + + if item.ProfileID != normalizedIDs[idx] { + updater.SetProfileID(normalizedIDs[idx]) + changed = true + } + if strings.TrimSpace(item.Name) != normalizedNames[idx] { + updater.SetName(normalizedNames[idx]) + changed = true + } + shouldActive := idx == activeIndex + if item.IsActive != shouldActive { + updater.SetIsActive(shouldActive) + changed = true + } + if !changed { + continue + } + if _, err = updater.Save(ctx); err != nil { + _ = tx.Rollback() return err } } + if err = tx.Commit(); err != nil { + return err + } return nil } +func parseSourceType(sourceType string) (backupsourceconfig.SourceType, error) { + switch strings.TrimSpace(sourceType) { + case backupsourceconfig.SourceTypePostgres.String(): + return backupsourceconfig.SourceTypePostgres, nil + case backupsourceconfig.SourceTypeRedis.String(): + return backupsourceconfig.SourceTypeRedis, nil + default: + return "", ErrSourceTypeInvalid + } +} + +func (s *Store) resolveSourceProfileID(ctx context.Context, sourceType, requestedProfileID string) (string, error) { + requestedProfileID = strings.TrimSpace(requestedProfileID) + if requestedProfileID != "" { + entity, err := s.getSourceConfigEntity(ctx, sourceType, requestedProfileID) + if err != nil { + return "", err + } + return entity.ProfileID, nil + } + + entity, err := s.getActiveSourceConfigEntity(ctx, sourceType) + if err != nil { + return "", err + } + return strings.TrimSpace(entity.ProfileID), nil +} + +func backupTypeNeedsPostgres(backupType string) bool { + switch strings.TrimSpace(backupType) { + case backupjob.BackupTypePostgres.String(), backupjob.BackupTypeFull.String(): + return true + default: + return false + } +} + +func backupTypeNeedsRedis(backupType string) bool { + switch strings.TrimSpace(backupType) { + case backupjob.BackupTypeRedis.String(), backupjob.BackupTypeFull.String(): + return true + default: + return false + } +} + func normalizeSQLitePath(sqlitePath string) string { path := strings.TrimSpace(sqlitePath) if path == "" { diff --git a/backup/internal/store/entstore/store_test.go b/backup/internal/store/entstore/store_test.go index c5217ee58..a44c32333 100644 --- a/backup/internal/store/entstore/store_test.go +++ b/backup/internal/store/entstore/store_test.go @@ -2,6 +2,7 @@ package entstore import ( "context" + "errors" "path/filepath" "testing" "time" @@ -137,6 +138,187 @@ func TestStore_UpdateConfig_KeepSecretWhenEmpty(t *testing.T) { require.Equal(t, "s3-secret", finalCfg.S3.SecretAccessKey) } +func TestStore_S3ProfilesLifecycle(t *testing.T) { + store := openTestStore(t) + ctx := context.Background() + + initialProfiles, err := store.ListS3Profiles(ctx) + require.NoError(t, err) + require.Len(t, initialProfiles, 1) + require.Equal(t, defaultS3ProfileID, initialProfiles[0].ProfileID) + require.True(t, initialProfiles[0].IsActive) + + created, err := store.CreateS3Profile(ctx, CreateS3ProfileInput{ + ProfileID: "archive", + Name: "归档账号", + S3: S3Config{ + Enabled: true, + Region: "us-east-1", + Bucket: "archive-bucket", + AccessKeyID: "archive-ak", + SecretAccessKey: "archive-sk", + UseSSL: true, + }, + SetActive: false, + }) + require.NoError(t, err) + require.Equal(t, "archive", created.ProfileID) + require.False(t, created.IsActive) + require.True(t, created.SecretAccessKeyConfigured) + + updated, err := store.UpdateS3Profile(ctx, UpdateS3ProfileInput{ + ProfileID: "archive", + Name: "归档账号-更新", + S3: S3Config{ + Enabled: true, + Region: "us-east-1", + Bucket: "archive-bucket-updated", + AccessKeyID: "archive-ak-2", + SecretAccessKey: "", + UseSSL: true, + }, + }) + require.NoError(t, err) + require.Equal(t, "归档账号-更新", updated.Name) + require.Equal(t, "archive-ak-2", updated.S3.AccessKeyID) + require.Equal(t, "archive-sk", updated.S3.SecretAccessKey) + + active, err := store.SetActiveS3Profile(ctx, "archive") + require.NoError(t, err) + require.True(t, active.IsActive) + + cfg, err := store.GetConfig(ctx) + require.NoError(t, err) + require.Equal(t, "archive", cfg.ActiveS3ProfileID) + require.Equal(t, "archive-bucket-updated", cfg.S3.Bucket) + + err = store.DeleteS3Profile(ctx, "archive") + require.Error(t, err) + require.True(t, errors.Is(err, ErrActiveS3Profile)) + + _, err = store.SetActiveS3Profile(ctx, defaultS3ProfileID) + require.NoError(t, err) + require.NoError(t, store.DeleteS3Profile(ctx, "archive")) +} + +func TestStore_DeleteS3ProfileInUse(t *testing.T) { + store := openTestStore(t) + ctx := context.Background() + + _, err := store.CreateS3Profile(ctx, CreateS3ProfileInput{ + ProfileID: "for-job", + Name: "任务账号", + S3: S3Config{ + Enabled: true, + Region: "us-east-1", + Bucket: "job-bucket", + UseSSL: true, + }, + }) + require.NoError(t, err) + + _, _, err = store.CreateBackupJob(ctx, CreateBackupJobInput{ + BackupType: backupjob.BackupTypePostgres.String(), + UploadToS3: true, + TriggeredBy: "admin:9", + S3ProfileID: "for-job", + }) + require.NoError(t, err) + + err = store.DeleteS3Profile(ctx, "for-job") + require.Error(t, err) + require.True(t, errors.Is(err, ErrS3ProfileInUse)) +} + +func TestStore_SourceProfilesLifecycle(t *testing.T) { + store := openTestStore(t) + ctx := context.Background() + + initialPG, err := store.ListSourceProfiles(ctx, "postgres") + require.NoError(t, err) + require.Len(t, initialPG, 1) + require.Equal(t, defaultSourceID, initialPG[0].ProfileID) + require.True(t, initialPG[0].IsActive) + + created, err := store.CreateSourceProfile(ctx, CreateSourceProfileInput{ + SourceType: "postgres", + ProfileID: "pg-reporting", + Name: "报表库", + Config: SourceConfig{ + Host: "10.0.0.10", + Port: 15432, + User: "report_user", + Password: "secret", + Database: "reporting", + SSLMode: "require", + }, + SetActive: false, + }) + require.NoError(t, err) + require.Equal(t, "pg-reporting", created.ProfileID) + require.False(t, created.IsActive) + require.True(t, created.PasswordConfigured) + + active, err := store.SetActiveSourceProfile(ctx, "postgres", "pg-reporting") + require.NoError(t, err) + require.True(t, active.IsActive) + + cfg, err := store.GetConfig(ctx) + require.NoError(t, err) + require.Equal(t, "pg-reporting", cfg.ActivePostgresID) + require.Equal(t, "10.0.0.10", cfg.Postgres.Host) + require.Equal(t, int32(15432), cfg.Postgres.Port) + + err = store.DeleteSourceProfile(ctx, "postgres", "pg-reporting") + require.Error(t, err) + require.True(t, errors.Is(err, ErrSourceActive)) + + _, err = store.SetActiveSourceProfile(ctx, "postgres", defaultSourceID) + require.NoError(t, err) + require.NoError(t, store.DeleteSourceProfile(ctx, "postgres", "pg-reporting")) +} + +func TestStore_CreateBackupJob_WithSelectedSourceProfiles(t *testing.T) { + store := openTestStore(t) + ctx := context.Background() + + _, err := store.CreateSourceProfile(ctx, CreateSourceProfileInput{ + SourceType: "postgres", + ProfileID: "pg-custom", + Name: "自定义PG", + Config: SourceConfig{ + Host: "127.0.0.2", + Port: 6432, + User: "custom_user", + Database: "custom_db", + SSLMode: "disable", + }, + }) + require.NoError(t, err) + + _, err = store.CreateSourceProfile(ctx, CreateSourceProfileInput{ + SourceType: "redis", + ProfileID: "redis-custom", + Name: "自定义Redis", + Config: SourceConfig{ + Addr: "127.0.0.3:6380", + DB: 5, + }, + }) + require.NoError(t, err) + + job, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ + BackupType: backupjob.BackupTypeFull.String(), + TriggeredBy: "admin:10", + PostgresID: "pg-custom", + RedisID: "redis-custom", + }) + require.NoError(t, err) + require.True(t, created) + require.Equal(t, "pg-custom", job.PostgresProfileID) + require.Equal(t, "redis-custom", job.RedisProfileID) +} + func openTestStore(t *testing.T) *Store { t.Helper() diff --git a/backup/proto/backup/v1/backup.pb.go b/backup/proto/backup/v1/backup.pb.go index 07a5da1d5..30e288ef5 100644 --- a/backup/proto/backup/v1/backup.pb.go +++ b/backup/proto/backup/v1/backup.pb.go @@ -342,34 +342,1194 @@ func (x *S3Config) GetUseSsl() bool { } type BackupConfig struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` + BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` + SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` + RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` + KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` + Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` + Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` + S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` + ActiveS3ProfileId string `protobuf:"bytes,9,opt,name=active_s3_profile_id,json=activeS3ProfileId,proto3" json:"active_s3_profile_id,omitempty"` + ActivePostgresProfileId string `protobuf:"bytes,10,opt,name=active_postgres_profile_id,json=activePostgresProfileId,proto3" json:"active_postgres_profile_id,omitempty"` + ActiveRedisProfileId string `protobuf:"bytes,11,opt,name=active_redis_profile_id,json=activeRedisProfileId,proto3" json:"active_redis_profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *BackupConfig) Reset() { + *x = BackupConfig{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *BackupConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*BackupConfig) ProtoMessage() {} + +func (x *BackupConfig) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. +func (*BackupConfig) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} +} + +func (x *BackupConfig) GetSourceMode() string { + if x != nil { + return x.SourceMode + } + return "" +} + +func (x *BackupConfig) GetBackupRoot() string { + if x != nil { + return x.BackupRoot + } + return "" +} + +func (x *BackupConfig) GetSqlitePath() string { + if x != nil { + return x.SqlitePath + } + return "" +} + +func (x *BackupConfig) GetRetentionDays() int32 { + if x != nil { + return x.RetentionDays + } + return 0 +} + +func (x *BackupConfig) GetKeepLast() int32 { + if x != nil { + return x.KeepLast + } + return 0 +} + +func (x *BackupConfig) GetPostgres() *SourceConfig { + if x != nil { + return x.Postgres + } + return nil +} + +func (x *BackupConfig) GetRedis() *SourceConfig { + if x != nil { + return x.Redis + } + return nil +} + +func (x *BackupConfig) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +func (x *BackupConfig) GetActiveS3ProfileId() string { + if x != nil { + return x.ActiveS3ProfileId + } + return "" +} + +func (x *BackupConfig) GetActivePostgresProfileId() string { + if x != nil { + return x.ActivePostgresProfileId + } + return "" +} + +func (x *BackupConfig) GetActiveRedisProfileId() string { + if x != nil { + return x.ActiveRedisProfileId + } + return "" +} + +type GetConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigRequest) Reset() { + *x = GetConfigRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigRequest) ProtoMessage() {} + +func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. +func (*GetConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} +} + +type GetConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GetConfigResponse) Reset() { + *x = GetConfigResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GetConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GetConfigResponse) ProtoMessage() {} + +func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. +func (*GetConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} +} + +func (x *GetConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigRequest) Reset() { + *x = UpdateConfigRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigRequest) ProtoMessage() {} + +func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. +func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} +} + +func (x *UpdateConfigRequest) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateConfigResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateConfigResponse) Reset() { + *x = UpdateConfigResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateConfigResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateConfigResponse) ProtoMessage() {} + +func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. +func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} +} + +func (x *UpdateConfigResponse) GetConfig() *BackupConfig { + if x != nil { + return x.Config + } + return nil +} + +type SourceProfile struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + IsActive bool `protobuf:"varint,4,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` + Config *SourceConfig `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` + PasswordConfigured bool `protobuf:"varint,6,opt,name=password_configured,json=passwordConfigured,proto3" json:"password_configured,omitempty"` + CreatedAt string `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt string `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SourceProfile) Reset() { + *x = SourceProfile{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SourceProfile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SourceProfile) ProtoMessage() {} + +func (x *SourceProfile) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SourceProfile.ProtoReflect.Descriptor instead. +func (*SourceProfile) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} +} + +func (x *SourceProfile) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *SourceProfile) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *SourceProfile) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *SourceProfile) GetIsActive() bool { + if x != nil { + return x.IsActive + } + return false +} + +func (x *SourceProfile) GetConfig() *SourceConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *SourceProfile) GetPasswordConfigured() bool { + if x != nil { + return x.PasswordConfigured + } + return false +} + +func (x *SourceProfile) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +func (x *SourceProfile) GetUpdatedAt() string { + if x != nil { + return x.UpdatedAt + } + return "" +} + +type ListSourceProfilesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSourceProfilesRequest) Reset() { + *x = ListSourceProfilesRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[10] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSourceProfilesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSourceProfilesRequest) ProtoMessage() {} + +func (x *ListSourceProfilesRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[10] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSourceProfilesRequest.ProtoReflect.Descriptor instead. +func (*ListSourceProfilesRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} +} + +func (x *ListSourceProfilesRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +type ListSourceProfilesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*SourceProfile `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListSourceProfilesResponse) Reset() { + *x = ListSourceProfilesResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListSourceProfilesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListSourceProfilesResponse) ProtoMessage() {} + +func (x *ListSourceProfilesResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListSourceProfilesResponse.ProtoReflect.Descriptor instead. +func (*ListSourceProfilesResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} +} + +func (x *ListSourceProfilesResponse) GetItems() []*SourceProfile { + if x != nil { + return x.Items + } + return nil +} + +type CreateSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Config *SourceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + SetActive bool `protobuf:"varint,5,opt,name=set_active,json=setActive,proto3" json:"set_active,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSourceProfileRequest) Reset() { + *x = CreateSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSourceProfileRequest) ProtoMessage() {} + +func (x *CreateSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*CreateSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} +} + +func (x *CreateSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *CreateSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *CreateSourceProfileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *CreateSourceProfileRequest) GetConfig() *SourceConfig { + if x != nil { + return x.Config + } + return nil +} + +func (x *CreateSourceProfileRequest) GetSetActive() bool { + if x != nil { + return x.SetActive + } + return false +} + +type CreateSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *CreateSourceProfileResponse) Reset() { + *x = CreateSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *CreateSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateSourceProfileResponse) ProtoMessage() {} + +func (x *CreateSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use CreateSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*CreateSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} +} + +func (x *CreateSourceProfileResponse) GetProfile() *SourceProfile { + if x != nil { + return x.Profile + } + return nil +} + +type UpdateSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Config *SourceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateSourceProfileRequest) Reset() { + *x = UpdateSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSourceProfileRequest) ProtoMessage() {} + +func (x *UpdateSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*UpdateSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} +} + +func (x *UpdateSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *UpdateSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *UpdateSourceProfileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateSourceProfileRequest) GetConfig() *SourceConfig { + if x != nil { + return x.Config + } + return nil +} + +type UpdateSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *UpdateSourceProfileResponse) Reset() { + *x = UpdateSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *UpdateSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*UpdateSourceProfileResponse) ProtoMessage() {} + +func (x *UpdateSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use UpdateSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*UpdateSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} +} + +func (x *UpdateSourceProfileResponse) GetProfile() *SourceProfile { + if x != nil { + return x.Profile + } + return nil +} + +type DeleteSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteSourceProfileRequest) Reset() { + *x = DeleteSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSourceProfileRequest) ProtoMessage() {} + +func (x *DeleteSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*DeleteSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} +} + +func (x *DeleteSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *DeleteSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +type DeleteSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeleteSourceProfileResponse) Reset() { + *x = DeleteSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeleteSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeleteSourceProfileResponse) ProtoMessage() {} + +func (x *DeleteSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeleteSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*DeleteSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} +} + +type SetActiveSourceProfileRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` + ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetActiveSourceProfileRequest) Reset() { + *x = SetActiveSourceProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetActiveSourceProfileRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetActiveSourceProfileRequest) ProtoMessage() {} + +func (x *SetActiveSourceProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetActiveSourceProfileRequest.ProtoReflect.Descriptor instead. +func (*SetActiveSourceProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} +} + +func (x *SetActiveSourceProfileRequest) GetSourceType() string { + if x != nil { + return x.SourceType + } + return "" +} + +func (x *SetActiveSourceProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +type SetActiveSourceProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SetActiveSourceProfileResponse) Reset() { + *x = SetActiveSourceProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SetActiveSourceProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SetActiveSourceProfileResponse) ProtoMessage() {} + +func (x *SetActiveSourceProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SetActiveSourceProfileResponse.ProtoReflect.Descriptor instead. +func (*SetActiveSourceProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} +} + +func (x *SetActiveSourceProfileResponse) GetProfile() *SourceProfile { + if x != nil { + return x.Profile + } + return nil +} + +type ValidateS3Request struct { + state protoimpl.MessageState `protogen:"open.v1"` + S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Request) Reset() { + *x = ValidateS3Request{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[20] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Request) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Request) ProtoMessage() {} + +func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[20] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. +func (*ValidateS3Request) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{20} +} + +func (x *ValidateS3Request) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type ValidateS3Response struct { + state protoimpl.MessageState `protogen:"open.v1"` + Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ValidateS3Response) Reset() { + *x = ValidateS3Response{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[21] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ValidateS3Response) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ValidateS3Response) ProtoMessage() {} + +func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[21] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. +func (*ValidateS3Response) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{21} +} + +func (x *ValidateS3Response) GetOk() bool { + if x != nil { + return x.Ok + } + return false +} + +func (x *ValidateS3Response) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +type S3Profile struct { + state protoimpl.MessageState `protogen:"open.v1"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + IsActive bool `protobuf:"varint,3,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` + S3 *S3Config `protobuf:"bytes,4,opt,name=s3,proto3" json:"s3,omitempty"` + SecretAccessKeyConfigured bool `protobuf:"varint,5,opt,name=secret_access_key_configured,json=secretAccessKeyConfigured,proto3" json:"secret_access_key_configured,omitempty"` + CreatedAt string `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + UpdatedAt string `protobuf:"bytes,7,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *S3Profile) Reset() { + *x = S3Profile{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[22] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *S3Profile) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*S3Profile) ProtoMessage() {} + +func (x *S3Profile) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[22] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use S3Profile.ProtoReflect.Descriptor instead. +func (*S3Profile) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{22} +} + +func (x *S3Profile) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *S3Profile) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *S3Profile) GetIsActive() bool { + if x != nil { + return x.IsActive + } + return false +} + +func (x *S3Profile) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +func (x *S3Profile) GetSecretAccessKeyConfigured() bool { + if x != nil { + return x.SecretAccessKeyConfigured + } + return false +} + +func (x *S3Profile) GetCreatedAt() string { + if x != nil { + return x.CreatedAt + } + return "" +} + +func (x *S3Profile) GetUpdatedAt() string { + if x != nil { + return x.UpdatedAt + } + return "" +} + +type ListS3ProfilesRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListS3ProfilesRequest) Reset() { + *x = ListS3ProfilesRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[23] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListS3ProfilesRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListS3ProfilesRequest) ProtoMessage() {} + +func (x *ListS3ProfilesRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[23] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListS3ProfilesRequest.ProtoReflect.Descriptor instead. +func (*ListS3ProfilesRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{23} +} + +type ListS3ProfilesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Items []*S3Profile `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ListS3ProfilesResponse) Reset() { + *x = ListS3ProfilesResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[24] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ListS3ProfilesResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ListS3ProfilesResponse) ProtoMessage() {} + +func (x *ListS3ProfilesResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[24] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ListS3ProfilesResponse.ProtoReflect.Descriptor instead. +func (*ListS3ProfilesResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{24} +} + +func (x *ListS3ProfilesResponse) GetItems() []*S3Profile { + if x != nil { + return x.Items + } + return nil +} + +type CreateS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` - BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` - SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` - RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` - KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` - Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` - Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` - S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + S3 *S3Config `protobuf:"bytes,3,opt,name=s3,proto3" json:"s3,omitempty"` + SetActive bool `protobuf:"varint,4,opt,name=set_active,json=setActive,proto3" json:"set_active,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *BackupConfig) Reset() { - *x = BackupConfig{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[4] +func (x *CreateS3ProfileRequest) Reset() { + *x = CreateS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *BackupConfig) String() string { +func (x *CreateS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*BackupConfig) ProtoMessage() {} +func (*CreateS3ProfileRequest) ProtoMessage() {} -func (x *BackupConfig) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[4] +func (x *CreateS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[25] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -380,88 +1540,107 @@ func (x *BackupConfig) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. -func (*BackupConfig) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} +// Deprecated: Use CreateS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*CreateS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{25} } -func (x *BackupConfig) GetSourceMode() string { +func (x *CreateS3ProfileRequest) GetProfileId() string { if x != nil { - return x.SourceMode + return x.ProfileId } return "" } -func (x *BackupConfig) GetBackupRoot() string { +func (x *CreateS3ProfileRequest) GetName() string { if x != nil { - return x.BackupRoot + return x.Name } return "" } -func (x *BackupConfig) GetSqlitePath() string { +func (x *CreateS3ProfileRequest) GetS3() *S3Config { if x != nil { - return x.SqlitePath + return x.S3 } - return "" + return nil } -func (x *BackupConfig) GetRetentionDays() int32 { +func (x *CreateS3ProfileRequest) GetSetActive() bool { if x != nil { - return x.RetentionDays + return x.SetActive } - return 0 + return false } -func (x *BackupConfig) GetKeepLast() int32 { - if x != nil { - return x.KeepLast - } - return 0 +type CreateS3ProfileResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *BackupConfig) GetPostgres() *SourceConfig { - if x != nil { - return x.Postgres - } - return nil +func (x *CreateS3ProfileResponse) Reset() { + *x = CreateS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[26] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } -func (x *BackupConfig) GetRedis() *SourceConfig { +func (x *CreateS3ProfileResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*CreateS3ProfileResponse) ProtoMessage() {} + +func (x *CreateS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[26] if x != nil { - return x.Redis + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return nil + return mi.MessageOf(x) } -func (x *BackupConfig) GetS3() *S3Config { +// Deprecated: Use CreateS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*CreateS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{26} +} + +func (x *CreateS3ProfileResponse) GetProfile() *S3Profile { if x != nil { - return x.S3 + return x.Profile } return nil } -type GetConfigRequest struct { +type UpdateS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + S3 *S3Config `protobuf:"bytes,3,opt,name=s3,proto3" json:"s3,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *GetConfigRequest) Reset() { - *x = GetConfigRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[5] +func (x *UpdateS3ProfileRequest) Reset() { + *x = UpdateS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetConfigRequest) String() string { +func (x *UpdateS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetConfigRequest) ProtoMessage() {} +func (*UpdateS3ProfileRequest) ProtoMessage() {} -func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[5] +func (x *UpdateS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[27] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -472,33 +1651,54 @@ func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. -func (*GetConfigRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} +// Deprecated: Use UpdateS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*UpdateS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{27} } -type GetConfigResponse struct { +func (x *UpdateS3ProfileRequest) GetProfileId() string { + if x != nil { + return x.ProfileId + } + return "" +} + +func (x *UpdateS3ProfileRequest) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *UpdateS3ProfileRequest) GetS3() *S3Config { + if x != nil { + return x.S3 + } + return nil +} + +type UpdateS3ProfileResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *GetConfigResponse) Reset() { - *x = GetConfigResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[6] +func (x *UpdateS3ProfileResponse) Reset() { + *x = UpdateS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GetConfigResponse) String() string { +func (x *UpdateS3ProfileResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GetConfigResponse) ProtoMessage() {} +func (*UpdateS3ProfileResponse) ProtoMessage() {} -func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[6] +func (x *UpdateS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[28] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -509,40 +1709,40 @@ func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. -func (*GetConfigResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} +// Deprecated: Use UpdateS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*UpdateS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{28} } -func (x *GetConfigResponse) GetConfig() *BackupConfig { +func (x *UpdateS3ProfileResponse) GetProfile() *S3Profile { if x != nil { - return x.Config + return x.Profile } return nil } -type UpdateConfigRequest struct { +type DeleteS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *UpdateConfigRequest) Reset() { - *x = UpdateConfigRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[7] +func (x *DeleteS3ProfileRequest) Reset() { + *x = DeleteS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *UpdateConfigRequest) String() string { +func (x *DeleteS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateConfigRequest) ProtoMessage() {} +func (*DeleteS3ProfileRequest) ProtoMessage() {} -func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[7] +func (x *DeleteS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[29] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -553,40 +1753,39 @@ func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. -func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} +// Deprecated: Use DeleteS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*DeleteS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{29} } -func (x *UpdateConfigRequest) GetConfig() *BackupConfig { +func (x *DeleteS3ProfileRequest) GetProfileId() string { if x != nil { - return x.Config + return x.ProfileId } - return nil + return "" } -type UpdateConfigResponse struct { +type DeleteS3ProfileResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *UpdateConfigResponse) Reset() { - *x = UpdateConfigResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[8] +func (x *DeleteS3ProfileResponse) Reset() { + *x = DeleteS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *UpdateConfigResponse) String() string { +func (x *DeleteS3ProfileResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*UpdateConfigResponse) ProtoMessage() {} +func (*DeleteS3ProfileResponse) ProtoMessage() {} -func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[8] +func (x *DeleteS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[30] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -597,40 +1796,33 @@ func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. -func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} -} - -func (x *UpdateConfigResponse) GetConfig() *BackupConfig { - if x != nil { - return x.Config - } - return nil +// Deprecated: Use DeleteS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*DeleteS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{30} } -type ValidateS3Request struct { +type SetActiveS3ProfileRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` + ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *ValidateS3Request) Reset() { - *x = ValidateS3Request{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[9] +func (x *SetActiveS3ProfileRequest) Reset() { + *x = SetActiveS3ProfileRequest{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ValidateS3Request) String() string { +func (x *SetActiveS3ProfileRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateS3Request) ProtoMessage() {} +func (*SetActiveS3ProfileRequest) ProtoMessage() {} -func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[9] +func (x *SetActiveS3ProfileRequest) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[31] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -641,41 +1833,40 @@ func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. -func (*ValidateS3Request) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} +// Deprecated: Use SetActiveS3ProfileRequest.ProtoReflect.Descriptor instead. +func (*SetActiveS3ProfileRequest) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{31} } -func (x *ValidateS3Request) GetS3() *S3Config { +func (x *SetActiveS3ProfileRequest) GetProfileId() string { if x != nil { - return x.S3 + return x.ProfileId } - return nil + return "" } -type ValidateS3Response struct { +type SetActiveS3ProfileResponse struct { state protoimpl.MessageState `protogen:"open.v1"` - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *ValidateS3Response) Reset() { - *x = ValidateS3Response{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[10] +func (x *SetActiveS3ProfileResponse) Reset() { + *x = SetActiveS3ProfileResponse{} + mi := &file_proto_backup_v1_backup_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *ValidateS3Response) String() string { +func (x *SetActiveS3ProfileResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ValidateS3Response) ProtoMessage() {} +func (*SetActiveS3ProfileResponse) ProtoMessage() {} -func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[10] +func (x *SetActiveS3ProfileResponse) ProtoReflect() protoreflect.Message { + mi := &file_proto_backup_v1_backup_proto_msgTypes[32] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -686,38 +1877,34 @@ func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. -func (*ValidateS3Response) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} -} - -func (x *ValidateS3Response) GetOk() bool { - if x != nil { - return x.Ok - } - return false +// Deprecated: Use SetActiveS3ProfileResponse.ProtoReflect.Descriptor instead. +func (*SetActiveS3ProfileResponse) Descriptor() ([]byte, []int) { + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{32} } -func (x *ValidateS3Response) GetMessage() string { +func (x *SetActiveS3ProfileResponse) GetProfile() *S3Profile { if x != nil { - return x.Message + return x.Profile } - return "" + return nil } type CreateBackupJobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` - TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` - IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + S3ProfileId string `protobuf:"bytes,5,opt,name=s3_profile_id,json=s3ProfileId,proto3" json:"s3_profile_id,omitempty"` + PostgresProfileId string `protobuf:"bytes,6,opt,name=postgres_profile_id,json=postgresProfileId,proto3" json:"postgres_profile_id,omitempty"` + RedisProfileId string `protobuf:"bytes,7,opt,name=redis_profile_id,json=redisProfileId,proto3" json:"redis_profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *CreateBackupJobRequest) Reset() { *x = CreateBackupJobRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + mi := &file_proto_backup_v1_backup_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -729,7 +1916,7 @@ func (x *CreateBackupJobRequest) String() string { func (*CreateBackupJobRequest) ProtoMessage() {} func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[11] + mi := &file_proto_backup_v1_backup_proto_msgTypes[33] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -742,7 +1929,7 @@ func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupJobRequest.ProtoReflect.Descriptor instead. func (*CreateBackupJobRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{33} } func (x *CreateBackupJobRequest) GetBackupType() string { @@ -773,6 +1960,27 @@ func (x *CreateBackupJobRequest) GetIdempotencyKey() string { return "" } +func (x *CreateBackupJobRequest) GetS3ProfileId() string { + if x != nil { + return x.S3ProfileId + } + return "" +} + +func (x *CreateBackupJobRequest) GetPostgresProfileId() string { + if x != nil { + return x.PostgresProfileId + } + return "" +} + +func (x *CreateBackupJobRequest) GetRedisProfileId() string { + if x != nil { + return x.RedisProfileId + } + return "" +} + type BackupArtifact struct { state protoimpl.MessageState `protogen:"open.v1"` LocalPath string `protobuf:"bytes,1,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` @@ -784,7 +1992,7 @@ type BackupArtifact struct { func (x *BackupArtifact) Reset() { *x = BackupArtifact{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + mi := &file_proto_backup_v1_backup_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -796,7 +2004,7 @@ func (x *BackupArtifact) String() string { func (*BackupArtifact) ProtoMessage() {} func (x *BackupArtifact) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[12] + mi := &file_proto_backup_v1_backup_proto_msgTypes[34] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -809,7 +2017,7 @@ func (x *BackupArtifact) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupArtifact.ProtoReflect.Descriptor instead. func (*BackupArtifact) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{34} } func (x *BackupArtifact) GetLocalPath() string { @@ -844,7 +2052,7 @@ type BackupS3Object struct { func (x *BackupS3Object) Reset() { *x = BackupS3Object{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + mi := &file_proto_backup_v1_backup_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -856,7 +2064,7 @@ func (x *BackupS3Object) String() string { func (*BackupS3Object) ProtoMessage() {} func (x *BackupS3Object) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[13] + mi := &file_proto_backup_v1_backup_proto_msgTypes[35] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -869,7 +2077,7 @@ func (x *BackupS3Object) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupS3Object.ProtoReflect.Descriptor instead. func (*BackupS3Object) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{35} } func (x *BackupS3Object) GetBucket() string { @@ -894,25 +2102,28 @@ func (x *BackupS3Object) GetEtag() string { } type BackupJob struct { - state protoimpl.MessageState `protogen:"open.v1"` - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` - BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` - IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` - UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` - StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` - ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` - S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache + state protoimpl.MessageState `protogen:"open.v1"` + JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` + BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` + Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` + TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` + IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` + UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` + StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` + FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` + ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` + Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` + S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` + S3ProfileId string `protobuf:"bytes,12,opt,name=s3_profile_id,json=s3ProfileId,proto3" json:"s3_profile_id,omitempty"` + PostgresProfileId string `protobuf:"bytes,13,opt,name=postgres_profile_id,json=postgresProfileId,proto3" json:"postgres_profile_id,omitempty"` + RedisProfileId string `protobuf:"bytes,14,opt,name=redis_profile_id,json=redisProfileId,proto3" json:"redis_profile_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } func (x *BackupJob) Reset() { *x = BackupJob{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + mi := &file_proto_backup_v1_backup_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -924,7 +2135,7 @@ func (x *BackupJob) String() string { func (*BackupJob) ProtoMessage() {} func (x *BackupJob) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[14] + mi := &file_proto_backup_v1_backup_proto_msgTypes[36] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -937,7 +2148,7 @@ func (x *BackupJob) ProtoReflect() protoreflect.Message { // Deprecated: Use BackupJob.ProtoReflect.Descriptor instead. func (*BackupJob) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{36} } func (x *BackupJob) GetJobId() string { @@ -1017,6 +2228,27 @@ func (x *BackupJob) GetS3Object() *BackupS3Object { return nil } +func (x *BackupJob) GetS3ProfileId() string { + if x != nil { + return x.S3ProfileId + } + return "" +} + +func (x *BackupJob) GetPostgresProfileId() string { + if x != nil { + return x.PostgresProfileId + } + return "" +} + +func (x *BackupJob) GetRedisProfileId() string { + if x != nil { + return x.RedisProfileId + } + return "" +} + type CreateBackupJobResponse struct { state protoimpl.MessageState `protogen:"open.v1"` Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` @@ -1026,7 +2258,7 @@ type CreateBackupJobResponse struct { func (x *CreateBackupJobResponse) Reset() { *x = CreateBackupJobResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + mi := &file_proto_backup_v1_backup_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1038,7 +2270,7 @@ func (x *CreateBackupJobResponse) String() string { func (*CreateBackupJobResponse) ProtoMessage() {} func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[15] + mi := &file_proto_backup_v1_backup_proto_msgTypes[37] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1051,7 +2283,7 @@ func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateBackupJobResponse.ProtoReflect.Descriptor instead. func (*CreateBackupJobResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{37} } func (x *CreateBackupJobResponse) GetJob() *BackupJob { @@ -1073,7 +2305,7 @@ type ListBackupJobsRequest struct { func (x *ListBackupJobsRequest) Reset() { *x = ListBackupJobsRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + mi := &file_proto_backup_v1_backup_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1085,7 +2317,7 @@ func (x *ListBackupJobsRequest) String() string { func (*ListBackupJobsRequest) ProtoMessage() {} func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[16] + mi := &file_proto_backup_v1_backup_proto_msgTypes[38] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1098,7 +2330,7 @@ func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupJobsRequest.ProtoReflect.Descriptor instead. func (*ListBackupJobsRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{38} } func (x *ListBackupJobsRequest) GetPageSize() int32 { @@ -1139,7 +2371,7 @@ type ListBackupJobsResponse struct { func (x *ListBackupJobsResponse) Reset() { *x = ListBackupJobsResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + mi := &file_proto_backup_v1_backup_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1151,7 +2383,7 @@ func (x *ListBackupJobsResponse) String() string { func (*ListBackupJobsResponse) ProtoMessage() {} func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[17] + mi := &file_proto_backup_v1_backup_proto_msgTypes[39] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1164,7 +2396,7 @@ func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ListBackupJobsResponse.ProtoReflect.Descriptor instead. func (*ListBackupJobsResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{39} } func (x *ListBackupJobsResponse) GetItems() []*BackupJob { @@ -1190,7 +2422,7 @@ type GetBackupJobRequest struct { func (x *GetBackupJobRequest) Reset() { *x = GetBackupJobRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + mi := &file_proto_backup_v1_backup_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1202,7 +2434,7 @@ func (x *GetBackupJobRequest) String() string { func (*GetBackupJobRequest) ProtoMessage() {} func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[18] + mi := &file_proto_backup_v1_backup_proto_msgTypes[40] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1215,7 +2447,7 @@ func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupJobRequest.ProtoReflect.Descriptor instead. func (*GetBackupJobRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{40} } func (x *GetBackupJobRequest) GetJobId() string { @@ -1234,7 +2466,7 @@ type GetBackupJobResponse struct { func (x *GetBackupJobResponse) Reset() { *x = GetBackupJobResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + mi := &file_proto_backup_v1_backup_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1246,7 +2478,7 @@ func (x *GetBackupJobResponse) String() string { func (*GetBackupJobResponse) ProtoMessage() {} func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[19] + mi := &file_proto_backup_v1_backup_proto_msgTypes[41] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1259,7 +2491,7 @@ func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetBackupJobResponse.ProtoReflect.Descriptor instead. func (*GetBackupJobResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} + return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{41} } func (x *GetBackupJobResponse) GetJob() *BackupJob { @@ -1300,7 +2532,7 @@ const file_proto_backup_v1_backup_proto_rawDesc = "" + "\x11secret_access_key\x18\x06 \x01(\tR\x0fsecretAccessKey\x12\x16\n" + "\x06prefix\x18\a \x01(\tR\x06prefix\x12(\n" + "\x10force_path_style\x18\b \x01(\bR\x0eforcePathStyle\x12\x17\n" + - "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xbe\x02\n" + + "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xe3\x03\n" + "\fBackupConfig\x12\x1f\n" + "\vsource_mode\x18\x01 \x01(\tR\n" + "sourceMode\x12\x1f\n" + @@ -1312,26 +2544,123 @@ const file_proto_backup_v1_backup_proto_rawDesc = "" + "\tkeep_last\x18\x05 \x01(\x05R\bkeepLast\x123\n" + "\bpostgres\x18\x06 \x01(\v2\x17.backup.v1.SourceConfigR\bpostgres\x12-\n" + "\x05redis\x18\a \x01(\v2\x17.backup.v1.SourceConfigR\x05redis\x12#\n" + - "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"\x12\n" + + "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12/\n" + + "\x14active_s3_profile_id\x18\t \x01(\tR\x11activeS3ProfileId\x12;\n" + + "\x1aactive_postgres_profile_id\x18\n" + + " \x01(\tR\x17activePostgresProfileId\x125\n" + + "\x17active_redis_profile_id\x18\v \x01(\tR\x14activeRedisProfileId\"\x12\n" + "\x10GetConfigRequest\"D\n" + "\x11GetConfigResponse\x12/\n" + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"F\n" + "\x13UpdateConfigRequest\x12/\n" + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"G\n" + "\x14UpdateConfigResponse\x12/\n" + - "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"8\n" + + "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"\xa0\x02\n" + + "\rSourceProfile\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12\x1b\n" + + "\tis_active\x18\x04 \x01(\bR\bisActive\x12/\n" + + "\x06config\x18\x05 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\x12/\n" + + "\x13password_configured\x18\x06 \x01(\bR\x12passwordConfigured\x12\x1d\n" + + "\n" + + "created_at\x18\a \x01(\tR\tcreatedAt\x12\x1d\n" + + "\n" + + "updated_at\x18\b \x01(\tR\tupdatedAt\"<\n" + + "\x19ListSourceProfilesRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\"L\n" + + "\x1aListSourceProfilesResponse\x12.\n" + + "\x05items\x18\x01 \x03(\v2\x18.backup.v1.SourceProfileR\x05items\"\xc0\x01\n" + + "\x1aCreateSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12/\n" + + "\x06config\x18\x04 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\x12\x1d\n" + + "\n" + + "set_active\x18\x05 \x01(\bR\tsetActive\"Q\n" + + "\x1bCreateSourceProfileResponse\x122\n" + + "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"\xa1\x01\n" + + "\x1aUpdateSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x03 \x01(\tR\x04name\x12/\n" + + "\x06config\x18\x04 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\"Q\n" + + "\x1bUpdateSourceProfileResponse\x122\n" + + "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"\\\n" + + "\x1aDeleteSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\"\x1d\n" + + "\x1bDeleteSourceProfileResponse\"_\n" + + "\x1dSetActiveSourceProfileRequest\x12\x1f\n" + + "\vsource_type\x18\x01 \x01(\tR\n" + + "sourceType\x12\x1d\n" + + "\n" + + "profile_id\x18\x02 \x01(\tR\tprofileId\"T\n" + + "\x1eSetActiveSourceProfileResponse\x122\n" + + "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"8\n" + "\x11ValidateS3Request\x12#\n" + "\x02s3\x18\x01 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\">\n" + "\x12ValidateS3Response\x12\x0e\n" + "\x02ok\x18\x01 \x01(\bR\x02ok\x12\x18\n" + - "\amessage\x18\x02 \x01(\tR\amessage\"\xa7\x01\n" + + "\amessage\x18\x02 \x01(\tR\amessage\"\xff\x01\n" + + "\tS3Profile\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12\x1b\n" + + "\tis_active\x18\x03 \x01(\bR\bisActive\x12#\n" + + "\x02s3\x18\x04 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12?\n" + + "\x1csecret_access_key_configured\x18\x05 \x01(\bR\x19secretAccessKeyConfigured\x12\x1d\n" + + "\n" + + "created_at\x18\x06 \x01(\tR\tcreatedAt\x12\x1d\n" + + "\n" + + "updated_at\x18\a \x01(\tR\tupdatedAt\"\x17\n" + + "\x15ListS3ProfilesRequest\"D\n" + + "\x16ListS3ProfilesResponse\x12*\n" + + "\x05items\x18\x01 \x03(\v2\x14.backup.v1.S3ProfileR\x05items\"\x8f\x01\n" + + "\x16CreateS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12#\n" + + "\x02s3\x18\x03 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12\x1d\n" + + "\n" + + "set_active\x18\x04 \x01(\bR\tsetActive\"I\n" + + "\x17CreateS3ProfileResponse\x12.\n" + + "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"p\n" + + "\x16UpdateS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + + "\x04name\x18\x02 \x01(\tR\x04name\x12#\n" + + "\x02s3\x18\x03 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"I\n" + + "\x17UpdateS3ProfileResponse\x12.\n" + + "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"7\n" + + "\x16DeleteS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\"\x19\n" + + "\x17DeleteS3ProfileResponse\":\n" + + "\x19SetActiveS3ProfileRequest\x12\x1d\n" + + "\n" + + "profile_id\x18\x01 \x01(\tR\tprofileId\"L\n" + + "\x1aSetActiveS3ProfileResponse\x12.\n" + + "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"\xa5\x02\n" + "\x16CreateBackupJobRequest\x12\x1f\n" + "\vbackup_type\x18\x01 \x01(\tR\n" + "backupType\x12 \n" + "\fupload_to_s3\x18\x02 \x01(\bR\n" + "uploadToS3\x12!\n" + "\ftriggered_by\x18\x03 \x01(\tR\vtriggeredBy\x12'\n" + - "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\"f\n" + + "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\x12\"\n" + + "\rs3_profile_id\x18\x05 \x01(\tR\vs3ProfileId\x12.\n" + + "\x13postgres_profile_id\x18\x06 \x01(\tR\x11postgresProfileId\x12(\n" + + "\x10redis_profile_id\x18\a \x01(\tR\x0eredisProfileId\"f\n" + "\x0eBackupArtifact\x12\x1d\n" + "\n" + "local_path\x18\x01 \x01(\tR\tlocalPath\x12\x1d\n" + @@ -1341,7 +2670,7 @@ const file_proto_backup_v1_backup_proto_rawDesc = "" + "\x0eBackupS3Object\x12\x16\n" + "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12\x10\n" + "\x03key\x18\x02 \x01(\tR\x03key\x12\x12\n" + - "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9d\x03\n" + + "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9b\x04\n" + "\tBackupJob\x12\x15\n" + "\x06job_id\x18\x01 \x01(\tR\x05jobId\x12\x1f\n" + "\vbackup_type\x18\x02 \x01(\tR\n" + @@ -1358,7 +2687,10 @@ const file_proto_backup_v1_backup_proto_rawDesc = "" + "\rerror_message\x18\t \x01(\tR\ferrorMessage\x125\n" + "\bartifact\x18\n" + " \x01(\v2\x19.backup.v1.BackupArtifactR\bartifact\x126\n" + - "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\"A\n" + + "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\x12\"\n" + + "\rs3_profile_id\x18\f \x01(\tR\vs3ProfileId\x12.\n" + + "\x13postgres_profile_id\x18\r \x01(\tR\x11postgresProfileId\x12(\n" + + "\x10redis_profile_id\x18\x0e \x01(\tR\x0eredisProfileId\"A\n" + "\x17CreateBackupJobResponse\x12&\n" + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job\"\x8c\x01\n" + "\x15ListBackupJobsRequest\x12\x1b\n" + @@ -1374,13 +2706,23 @@ const file_proto_backup_v1_backup_proto_rawDesc = "" + "\x13GetBackupJobRequest\x12\x15\n" + "\x06job_id\x18\x01 \x01(\tR\x05jobId\">\n" + "\x14GetBackupJobResponse\x12&\n" + - "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\xb4\x04\n" + + "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\x80\f\n" + "\rBackupService\x12=\n" + "\x06Health\x12\x18.backup.v1.HealthRequest\x1a\x19.backup.v1.HealthResponse\x12F\n" + "\tGetConfig\x12\x1b.backup.v1.GetConfigRequest\x1a\x1c.backup.v1.GetConfigResponse\x12O\n" + - "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12I\n" + + "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12a\n" + + "\x12ListSourceProfiles\x12$.backup.v1.ListSourceProfilesRequest\x1a%.backup.v1.ListSourceProfilesResponse\x12d\n" + + "\x13CreateSourceProfile\x12%.backup.v1.CreateSourceProfileRequest\x1a&.backup.v1.CreateSourceProfileResponse\x12d\n" + + "\x13UpdateSourceProfile\x12%.backup.v1.UpdateSourceProfileRequest\x1a&.backup.v1.UpdateSourceProfileResponse\x12d\n" + + "\x13DeleteSourceProfile\x12%.backup.v1.DeleteSourceProfileRequest\x1a&.backup.v1.DeleteSourceProfileResponse\x12m\n" + + "\x16SetActiveSourceProfile\x12(.backup.v1.SetActiveSourceProfileRequest\x1a).backup.v1.SetActiveSourceProfileResponse\x12I\n" + "\n" + - "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12X\n" + + "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12U\n" + + "\x0eListS3Profiles\x12 .backup.v1.ListS3ProfilesRequest\x1a!.backup.v1.ListS3ProfilesResponse\x12X\n" + + "\x0fCreateS3Profile\x12!.backup.v1.CreateS3ProfileRequest\x1a\".backup.v1.CreateS3ProfileResponse\x12X\n" + + "\x0fUpdateS3Profile\x12!.backup.v1.UpdateS3ProfileRequest\x1a\".backup.v1.UpdateS3ProfileResponse\x12X\n" + + "\x0fDeleteS3Profile\x12!.backup.v1.DeleteS3ProfileRequest\x1a\".backup.v1.DeleteS3ProfileResponse\x12a\n" + + "\x12SetActiveS3Profile\x12$.backup.v1.SetActiveS3ProfileRequest\x1a%.backup.v1.SetActiveS3ProfileResponse\x12X\n" + "\x0fCreateBackupJob\x12!.backup.v1.CreateBackupJobRequest\x1a\".backup.v1.CreateBackupJobResponse\x12U\n" + "\x0eListBackupJobs\x12 .backup.v1.ListBackupJobsRequest\x1a!.backup.v1.ListBackupJobsResponse\x12O\n" + "\fGetBackupJob\x12\x1e.backup.v1.GetBackupJobRequest\x1a\x1f.backup.v1.GetBackupJobResponseB=Z;github.com/Wei-Shaw/sub2api/backup/proto/backup/v1;backupv1b\x06proto3" @@ -1397,28 +2739,50 @@ func file_proto_backup_v1_backup_proto_rawDescGZIP() []byte { return file_proto_backup_v1_backup_proto_rawDescData } -var file_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 20) +var file_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 42) var file_proto_backup_v1_backup_proto_goTypes = []any{ - (*HealthRequest)(nil), // 0: backup.v1.HealthRequest - (*HealthResponse)(nil), // 1: backup.v1.HealthResponse - (*SourceConfig)(nil), // 2: backup.v1.SourceConfig - (*S3Config)(nil), // 3: backup.v1.S3Config - (*BackupConfig)(nil), // 4: backup.v1.BackupConfig - (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest - (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse - (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest - (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse - (*ValidateS3Request)(nil), // 9: backup.v1.ValidateS3Request - (*ValidateS3Response)(nil), // 10: backup.v1.ValidateS3Response - (*CreateBackupJobRequest)(nil), // 11: backup.v1.CreateBackupJobRequest - (*BackupArtifact)(nil), // 12: backup.v1.BackupArtifact - (*BackupS3Object)(nil), // 13: backup.v1.BackupS3Object - (*BackupJob)(nil), // 14: backup.v1.BackupJob - (*CreateBackupJobResponse)(nil), // 15: backup.v1.CreateBackupJobResponse - (*ListBackupJobsRequest)(nil), // 16: backup.v1.ListBackupJobsRequest - (*ListBackupJobsResponse)(nil), // 17: backup.v1.ListBackupJobsResponse - (*GetBackupJobRequest)(nil), // 18: backup.v1.GetBackupJobRequest - (*GetBackupJobResponse)(nil), // 19: backup.v1.GetBackupJobResponse + (*HealthRequest)(nil), // 0: backup.v1.HealthRequest + (*HealthResponse)(nil), // 1: backup.v1.HealthResponse + (*SourceConfig)(nil), // 2: backup.v1.SourceConfig + (*S3Config)(nil), // 3: backup.v1.S3Config + (*BackupConfig)(nil), // 4: backup.v1.BackupConfig + (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest + (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse + (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest + (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse + (*SourceProfile)(nil), // 9: backup.v1.SourceProfile + (*ListSourceProfilesRequest)(nil), // 10: backup.v1.ListSourceProfilesRequest + (*ListSourceProfilesResponse)(nil), // 11: backup.v1.ListSourceProfilesResponse + (*CreateSourceProfileRequest)(nil), // 12: backup.v1.CreateSourceProfileRequest + (*CreateSourceProfileResponse)(nil), // 13: backup.v1.CreateSourceProfileResponse + (*UpdateSourceProfileRequest)(nil), // 14: backup.v1.UpdateSourceProfileRequest + (*UpdateSourceProfileResponse)(nil), // 15: backup.v1.UpdateSourceProfileResponse + (*DeleteSourceProfileRequest)(nil), // 16: backup.v1.DeleteSourceProfileRequest + (*DeleteSourceProfileResponse)(nil), // 17: backup.v1.DeleteSourceProfileResponse + (*SetActiveSourceProfileRequest)(nil), // 18: backup.v1.SetActiveSourceProfileRequest + (*SetActiveSourceProfileResponse)(nil), // 19: backup.v1.SetActiveSourceProfileResponse + (*ValidateS3Request)(nil), // 20: backup.v1.ValidateS3Request + (*ValidateS3Response)(nil), // 21: backup.v1.ValidateS3Response + (*S3Profile)(nil), // 22: backup.v1.S3Profile + (*ListS3ProfilesRequest)(nil), // 23: backup.v1.ListS3ProfilesRequest + (*ListS3ProfilesResponse)(nil), // 24: backup.v1.ListS3ProfilesResponse + (*CreateS3ProfileRequest)(nil), // 25: backup.v1.CreateS3ProfileRequest + (*CreateS3ProfileResponse)(nil), // 26: backup.v1.CreateS3ProfileResponse + (*UpdateS3ProfileRequest)(nil), // 27: backup.v1.UpdateS3ProfileRequest + (*UpdateS3ProfileResponse)(nil), // 28: backup.v1.UpdateS3ProfileResponse + (*DeleteS3ProfileRequest)(nil), // 29: backup.v1.DeleteS3ProfileRequest + (*DeleteS3ProfileResponse)(nil), // 30: backup.v1.DeleteS3ProfileResponse + (*SetActiveS3ProfileRequest)(nil), // 31: backup.v1.SetActiveS3ProfileRequest + (*SetActiveS3ProfileResponse)(nil), // 32: backup.v1.SetActiveS3ProfileResponse + (*CreateBackupJobRequest)(nil), // 33: backup.v1.CreateBackupJobRequest + (*BackupArtifact)(nil), // 34: backup.v1.BackupArtifact + (*BackupS3Object)(nil), // 35: backup.v1.BackupS3Object + (*BackupJob)(nil), // 36: backup.v1.BackupJob + (*CreateBackupJobResponse)(nil), // 37: backup.v1.CreateBackupJobResponse + (*ListBackupJobsRequest)(nil), // 38: backup.v1.ListBackupJobsRequest + (*ListBackupJobsResponse)(nil), // 39: backup.v1.ListBackupJobsResponse + (*GetBackupJobRequest)(nil), // 40: backup.v1.GetBackupJobRequest + (*GetBackupJobResponse)(nil), // 41: backup.v1.GetBackupJobResponse } var file_proto_backup_v1_backup_proto_depIdxs = []int32{ 2, // 0: backup.v1.BackupConfig.postgres:type_name -> backup.v1.SourceConfig @@ -1427,31 +2791,65 @@ var file_proto_backup_v1_backup_proto_depIdxs = []int32{ 4, // 3: backup.v1.GetConfigResponse.config:type_name -> backup.v1.BackupConfig 4, // 4: backup.v1.UpdateConfigRequest.config:type_name -> backup.v1.BackupConfig 4, // 5: backup.v1.UpdateConfigResponse.config:type_name -> backup.v1.BackupConfig - 3, // 6: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config - 12, // 7: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact - 13, // 8: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object - 14, // 9: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob - 14, // 10: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob - 14, // 11: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob - 0, // 12: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest - 5, // 13: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest - 7, // 14: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest - 9, // 15: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request - 11, // 16: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest - 16, // 17: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest - 18, // 18: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest - 1, // 19: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse - 6, // 20: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse - 8, // 21: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse - 10, // 22: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response - 15, // 23: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse - 17, // 24: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse - 19, // 25: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse - 19, // [19:26] is the sub-list for method output_type - 12, // [12:19] is the sub-list for method input_type - 12, // [12:12] is the sub-list for extension type_name - 12, // [12:12] is the sub-list for extension extendee - 0, // [0:12] is the sub-list for field type_name + 2, // 6: backup.v1.SourceProfile.config:type_name -> backup.v1.SourceConfig + 9, // 7: backup.v1.ListSourceProfilesResponse.items:type_name -> backup.v1.SourceProfile + 2, // 8: backup.v1.CreateSourceProfileRequest.config:type_name -> backup.v1.SourceConfig + 9, // 9: backup.v1.CreateSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile + 2, // 10: backup.v1.UpdateSourceProfileRequest.config:type_name -> backup.v1.SourceConfig + 9, // 11: backup.v1.UpdateSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile + 9, // 12: backup.v1.SetActiveSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile + 3, // 13: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config + 3, // 14: backup.v1.S3Profile.s3:type_name -> backup.v1.S3Config + 22, // 15: backup.v1.ListS3ProfilesResponse.items:type_name -> backup.v1.S3Profile + 3, // 16: backup.v1.CreateS3ProfileRequest.s3:type_name -> backup.v1.S3Config + 22, // 17: backup.v1.CreateS3ProfileResponse.profile:type_name -> backup.v1.S3Profile + 3, // 18: backup.v1.UpdateS3ProfileRequest.s3:type_name -> backup.v1.S3Config + 22, // 19: backup.v1.UpdateS3ProfileResponse.profile:type_name -> backup.v1.S3Profile + 22, // 20: backup.v1.SetActiveS3ProfileResponse.profile:type_name -> backup.v1.S3Profile + 34, // 21: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact + 35, // 22: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object + 36, // 23: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob + 36, // 24: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob + 36, // 25: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob + 0, // 26: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest + 5, // 27: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest + 7, // 28: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest + 10, // 29: backup.v1.BackupService.ListSourceProfiles:input_type -> backup.v1.ListSourceProfilesRequest + 12, // 30: backup.v1.BackupService.CreateSourceProfile:input_type -> backup.v1.CreateSourceProfileRequest + 14, // 31: backup.v1.BackupService.UpdateSourceProfile:input_type -> backup.v1.UpdateSourceProfileRequest + 16, // 32: backup.v1.BackupService.DeleteSourceProfile:input_type -> backup.v1.DeleteSourceProfileRequest + 18, // 33: backup.v1.BackupService.SetActiveSourceProfile:input_type -> backup.v1.SetActiveSourceProfileRequest + 20, // 34: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request + 23, // 35: backup.v1.BackupService.ListS3Profiles:input_type -> backup.v1.ListS3ProfilesRequest + 25, // 36: backup.v1.BackupService.CreateS3Profile:input_type -> backup.v1.CreateS3ProfileRequest + 27, // 37: backup.v1.BackupService.UpdateS3Profile:input_type -> backup.v1.UpdateS3ProfileRequest + 29, // 38: backup.v1.BackupService.DeleteS3Profile:input_type -> backup.v1.DeleteS3ProfileRequest + 31, // 39: backup.v1.BackupService.SetActiveS3Profile:input_type -> backup.v1.SetActiveS3ProfileRequest + 33, // 40: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest + 38, // 41: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest + 40, // 42: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest + 1, // 43: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse + 6, // 44: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse + 8, // 45: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse + 11, // 46: backup.v1.BackupService.ListSourceProfiles:output_type -> backup.v1.ListSourceProfilesResponse + 13, // 47: backup.v1.BackupService.CreateSourceProfile:output_type -> backup.v1.CreateSourceProfileResponse + 15, // 48: backup.v1.BackupService.UpdateSourceProfile:output_type -> backup.v1.UpdateSourceProfileResponse + 17, // 49: backup.v1.BackupService.DeleteSourceProfile:output_type -> backup.v1.DeleteSourceProfileResponse + 19, // 50: backup.v1.BackupService.SetActiveSourceProfile:output_type -> backup.v1.SetActiveSourceProfileResponse + 21, // 51: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response + 24, // 52: backup.v1.BackupService.ListS3Profiles:output_type -> backup.v1.ListS3ProfilesResponse + 26, // 53: backup.v1.BackupService.CreateS3Profile:output_type -> backup.v1.CreateS3ProfileResponse + 28, // 54: backup.v1.BackupService.UpdateS3Profile:output_type -> backup.v1.UpdateS3ProfileResponse + 30, // 55: backup.v1.BackupService.DeleteS3Profile:output_type -> backup.v1.DeleteS3ProfileResponse + 32, // 56: backup.v1.BackupService.SetActiveS3Profile:output_type -> backup.v1.SetActiveS3ProfileResponse + 37, // 57: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse + 39, // 58: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse + 41, // 59: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse + 43, // [43:60] is the sub-list for method output_type + 26, // [26:43] is the sub-list for method input_type + 26, // [26:26] is the sub-list for extension type_name + 26, // [26:26] is the sub-list for extension extendee + 0, // [0:26] is the sub-list for field type_name } func init() { file_proto_backup_v1_backup_proto_init() } @@ -1465,7 +2863,7 @@ func file_proto_backup_v1_backup_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc)), NumEnums: 0, - NumMessages: 20, + NumMessages: 42, NumExtensions: 0, NumServices: 1, }, diff --git a/backup/proto/backup/v1/backup.proto b/backup/proto/backup/v1/backup.proto index b1fe78627..8faa51d14 100644 --- a/backup/proto/backup/v1/backup.proto +++ b/backup/proto/backup/v1/backup.proto @@ -8,7 +8,17 @@ service BackupService { rpc Health(HealthRequest) returns (HealthResponse); rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse); + rpc ListSourceProfiles(ListSourceProfilesRequest) returns (ListSourceProfilesResponse); + rpc CreateSourceProfile(CreateSourceProfileRequest) returns (CreateSourceProfileResponse); + rpc UpdateSourceProfile(UpdateSourceProfileRequest) returns (UpdateSourceProfileResponse); + rpc DeleteSourceProfile(DeleteSourceProfileRequest) returns (DeleteSourceProfileResponse); + rpc SetActiveSourceProfile(SetActiveSourceProfileRequest) returns (SetActiveSourceProfileResponse); rpc ValidateS3(ValidateS3Request) returns (ValidateS3Response); + rpc ListS3Profiles(ListS3ProfilesRequest) returns (ListS3ProfilesResponse); + rpc CreateS3Profile(CreateS3ProfileRequest) returns (CreateS3ProfileResponse); + rpc UpdateS3Profile(UpdateS3ProfileRequest) returns (UpdateS3ProfileResponse); + rpc DeleteS3Profile(DeleteS3ProfileRequest) returns (DeleteS3ProfileResponse); + rpc SetActiveS3Profile(SetActiveS3ProfileRequest) returns (SetActiveS3ProfileResponse); rpc CreateBackupJob(CreateBackupJobRequest) returns (CreateBackupJobResponse); rpc ListBackupJobs(ListBackupJobsRequest) returns (ListBackupJobsResponse); rpc GetBackupJob(GetBackupJobRequest) returns (GetBackupJobResponse); @@ -56,6 +66,9 @@ message BackupConfig { SourceConfig postgres = 6; SourceConfig redis = 7; S3Config s3 = 8; + string active_s3_profile_id = 9; + string active_postgres_profile_id = 10; + string active_redis_profile_id = 11; } message GetConfigRequest {} @@ -72,6 +85,64 @@ message UpdateConfigResponse { BackupConfig config = 1; } +message SourceProfile { + string source_type = 1; + string profile_id = 2; + string name = 3; + bool is_active = 4; + SourceConfig config = 5; + bool password_configured = 6; + string created_at = 7; + string updated_at = 8; +} + +message ListSourceProfilesRequest { + string source_type = 1; +} + +message ListSourceProfilesResponse { + repeated SourceProfile items = 1; +} + +message CreateSourceProfileRequest { + string source_type = 1; + string profile_id = 2; + string name = 3; + SourceConfig config = 4; + bool set_active = 5; +} + +message CreateSourceProfileResponse { + SourceProfile profile = 1; +} + +message UpdateSourceProfileRequest { + string source_type = 1; + string profile_id = 2; + string name = 3; + SourceConfig config = 4; +} + +message UpdateSourceProfileResponse { + SourceProfile profile = 1; +} + +message DeleteSourceProfileRequest { + string source_type = 1; + string profile_id = 2; +} + +message DeleteSourceProfileResponse {} + +message SetActiveSourceProfileRequest { + string source_type = 1; + string profile_id = 2; +} + +message SetActiveSourceProfileResponse { + SourceProfile profile = 1; +} + message ValidateS3Request { S3Config s3 = 1; } @@ -81,11 +152,65 @@ message ValidateS3Response { string message = 2; } +message S3Profile { + string profile_id = 1; + string name = 2; + bool is_active = 3; + S3Config s3 = 4; + bool secret_access_key_configured = 5; + string created_at = 6; + string updated_at = 7; +} + +message ListS3ProfilesRequest {} + +message ListS3ProfilesResponse { + repeated S3Profile items = 1; +} + +message CreateS3ProfileRequest { + string profile_id = 1; + string name = 2; + S3Config s3 = 3; + bool set_active = 4; +} + +message CreateS3ProfileResponse { + S3Profile profile = 1; +} + +message UpdateS3ProfileRequest { + string profile_id = 1; + string name = 2; + S3Config s3 = 3; +} + +message UpdateS3ProfileResponse { + S3Profile profile = 1; +} + +message DeleteS3ProfileRequest { + string profile_id = 1; +} + +message DeleteS3ProfileResponse {} + +message SetActiveS3ProfileRequest { + string profile_id = 1; +} + +message SetActiveS3ProfileResponse { + S3Profile profile = 1; +} + message CreateBackupJobRequest { string backup_type = 1; bool upload_to_s3 = 2; string triggered_by = 3; string idempotency_key = 4; + string s3_profile_id = 5; + string postgres_profile_id = 6; + string redis_profile_id = 7; } message BackupArtifact { @@ -112,6 +237,9 @@ message BackupJob { string error_message = 9; BackupArtifact artifact = 10; BackupS3Object s3_object = 11; + string s3_profile_id = 12; + string postgres_profile_id = 13; + string redis_profile_id = 14; } message CreateBackupJobResponse { diff --git a/backup/proto/backup/v1/backup_grpc.pb.go b/backup/proto/backup/v1/backup_grpc.pb.go index c9d7383fe..c4731e1a1 100644 --- a/backup/proto/backup/v1/backup_grpc.pb.go +++ b/backup/proto/backup/v1/backup_grpc.pb.go @@ -19,13 +19,23 @@ import ( const _ = grpc.SupportPackageIsVersion9 const ( - BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" - BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" - BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" - BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" - BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" - BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" - BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" + BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" + BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" + BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" + BackupService_ListSourceProfiles_FullMethodName = "/backup.v1.BackupService/ListSourceProfiles" + BackupService_CreateSourceProfile_FullMethodName = "/backup.v1.BackupService/CreateSourceProfile" + BackupService_UpdateSourceProfile_FullMethodName = "/backup.v1.BackupService/UpdateSourceProfile" + BackupService_DeleteSourceProfile_FullMethodName = "/backup.v1.BackupService/DeleteSourceProfile" + BackupService_SetActiveSourceProfile_FullMethodName = "/backup.v1.BackupService/SetActiveSourceProfile" + BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" + BackupService_ListS3Profiles_FullMethodName = "/backup.v1.BackupService/ListS3Profiles" + BackupService_CreateS3Profile_FullMethodName = "/backup.v1.BackupService/CreateS3Profile" + BackupService_UpdateS3Profile_FullMethodName = "/backup.v1.BackupService/UpdateS3Profile" + BackupService_DeleteS3Profile_FullMethodName = "/backup.v1.BackupService/DeleteS3Profile" + BackupService_SetActiveS3Profile_FullMethodName = "/backup.v1.BackupService/SetActiveS3Profile" + BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" + BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" + BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" ) // BackupServiceClient is the client API for BackupService service. @@ -35,7 +45,17 @@ type BackupServiceClient interface { Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) + ListSourceProfiles(ctx context.Context, in *ListSourceProfilesRequest, opts ...grpc.CallOption) (*ListSourceProfilesResponse, error) + CreateSourceProfile(ctx context.Context, in *CreateSourceProfileRequest, opts ...grpc.CallOption) (*CreateSourceProfileResponse, error) + UpdateSourceProfile(ctx context.Context, in *UpdateSourceProfileRequest, opts ...grpc.CallOption) (*UpdateSourceProfileResponse, error) + DeleteSourceProfile(ctx context.Context, in *DeleteSourceProfileRequest, opts ...grpc.CallOption) (*DeleteSourceProfileResponse, error) + SetActiveSourceProfile(ctx context.Context, in *SetActiveSourceProfileRequest, opts ...grpc.CallOption) (*SetActiveSourceProfileResponse, error) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) + ListS3Profiles(ctx context.Context, in *ListS3ProfilesRequest, opts ...grpc.CallOption) (*ListS3ProfilesResponse, error) + CreateS3Profile(ctx context.Context, in *CreateS3ProfileRequest, opts ...grpc.CallOption) (*CreateS3ProfileResponse, error) + UpdateS3Profile(ctx context.Context, in *UpdateS3ProfileRequest, opts ...grpc.CallOption) (*UpdateS3ProfileResponse, error) + DeleteS3Profile(ctx context.Context, in *DeleteS3ProfileRequest, opts ...grpc.CallOption) (*DeleteS3ProfileResponse, error) + SetActiveS3Profile(ctx context.Context, in *SetActiveS3ProfileRequest, opts ...grpc.CallOption) (*SetActiveS3ProfileResponse, error) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) @@ -79,6 +99,56 @@ func (c *backupServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfig return out, nil } +func (c *backupServiceClient) ListSourceProfiles(ctx context.Context, in *ListSourceProfilesRequest, opts ...grpc.CallOption) (*ListSourceProfilesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListSourceProfilesResponse) + err := c.cc.Invoke(ctx, BackupService_ListSourceProfiles_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) CreateSourceProfile(ctx context.Context, in *CreateSourceProfileRequest, opts ...grpc.CallOption) (*CreateSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_CreateSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) UpdateSourceProfile(ctx context.Context, in *UpdateSourceProfileRequest, opts ...grpc.CallOption) (*UpdateSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_UpdateSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) DeleteSourceProfile(ctx context.Context, in *DeleteSourceProfileRequest, opts ...grpc.CallOption) (*DeleteSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_DeleteSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) SetActiveSourceProfile(ctx context.Context, in *SetActiveSourceProfileRequest, opts ...grpc.CallOption) (*SetActiveSourceProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SetActiveSourceProfileResponse) + err := c.cc.Invoke(ctx, BackupService_SetActiveSourceProfile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(ValidateS3Response) @@ -89,6 +159,56 @@ func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Requ return out, nil } +func (c *backupServiceClient) ListS3Profiles(ctx context.Context, in *ListS3ProfilesRequest, opts ...grpc.CallOption) (*ListS3ProfilesResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(ListS3ProfilesResponse) + err := c.cc.Invoke(ctx, BackupService_ListS3Profiles_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) CreateS3Profile(ctx context.Context, in *CreateS3ProfileRequest, opts ...grpc.CallOption) (*CreateS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(CreateS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_CreateS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) UpdateS3Profile(ctx context.Context, in *UpdateS3ProfileRequest, opts ...grpc.CallOption) (*UpdateS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(UpdateS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_UpdateS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) DeleteS3Profile(ctx context.Context, in *DeleteS3ProfileRequest, opts ...grpc.CallOption) (*DeleteS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(DeleteS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_DeleteS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *backupServiceClient) SetActiveS3Profile(ctx context.Context, in *SetActiveS3ProfileRequest, opts ...grpc.CallOption) (*SetActiveS3ProfileResponse, error) { + cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) + out := new(SetActiveS3ProfileResponse) + err := c.cc.Invoke(ctx, BackupService_SetActiveS3Profile_FullMethodName, in, out, cOpts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *backupServiceClient) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) { cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) out := new(CreateBackupJobResponse) @@ -126,7 +246,17 @@ type BackupServiceServer interface { Health(context.Context, *HealthRequest) (*HealthResponse, error) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) + ListSourceProfiles(context.Context, *ListSourceProfilesRequest) (*ListSourceProfilesResponse, error) + CreateSourceProfile(context.Context, *CreateSourceProfileRequest) (*CreateSourceProfileResponse, error) + UpdateSourceProfile(context.Context, *UpdateSourceProfileRequest) (*UpdateSourceProfileResponse, error) + DeleteSourceProfile(context.Context, *DeleteSourceProfileRequest) (*DeleteSourceProfileResponse, error) + SetActiveSourceProfile(context.Context, *SetActiveSourceProfileRequest) (*SetActiveSourceProfileResponse, error) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) + ListS3Profiles(context.Context, *ListS3ProfilesRequest) (*ListS3ProfilesResponse, error) + CreateS3Profile(context.Context, *CreateS3ProfileRequest) (*CreateS3ProfileResponse, error) + UpdateS3Profile(context.Context, *UpdateS3ProfileRequest) (*UpdateS3ProfileResponse, error) + DeleteS3Profile(context.Context, *DeleteS3ProfileRequest) (*DeleteS3ProfileResponse, error) + SetActiveS3Profile(context.Context, *SetActiveS3ProfileRequest) (*SetActiveS3ProfileResponse, error) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) @@ -149,9 +279,39 @@ func (UnimplementedBackupServiceServer) GetConfig(context.Context, *GetConfigReq func (UnimplementedBackupServiceServer) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) { return nil, status.Error(codes.Unimplemented, "method UpdateConfig not implemented") } +func (UnimplementedBackupServiceServer) ListSourceProfiles(context.Context, *ListSourceProfilesRequest) (*ListSourceProfilesResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListSourceProfiles not implemented") +} +func (UnimplementedBackupServiceServer) CreateSourceProfile(context.Context, *CreateSourceProfileRequest) (*CreateSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateSourceProfile not implemented") +} +func (UnimplementedBackupServiceServer) UpdateSourceProfile(context.Context, *UpdateSourceProfileRequest) (*UpdateSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateSourceProfile not implemented") +} +func (UnimplementedBackupServiceServer) DeleteSourceProfile(context.Context, *DeleteSourceProfileRequest) (*DeleteSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method DeleteSourceProfile not implemented") +} +func (UnimplementedBackupServiceServer) SetActiveSourceProfile(context.Context, *SetActiveSourceProfileRequest) (*SetActiveSourceProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method SetActiveSourceProfile not implemented") +} func (UnimplementedBackupServiceServer) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) { return nil, status.Error(codes.Unimplemented, "method ValidateS3 not implemented") } +func (UnimplementedBackupServiceServer) ListS3Profiles(context.Context, *ListS3ProfilesRequest) (*ListS3ProfilesResponse, error) { + return nil, status.Error(codes.Unimplemented, "method ListS3Profiles not implemented") +} +func (UnimplementedBackupServiceServer) CreateS3Profile(context.Context, *CreateS3ProfileRequest) (*CreateS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method CreateS3Profile not implemented") +} +func (UnimplementedBackupServiceServer) UpdateS3Profile(context.Context, *UpdateS3ProfileRequest) (*UpdateS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method UpdateS3Profile not implemented") +} +func (UnimplementedBackupServiceServer) DeleteS3Profile(context.Context, *DeleteS3ProfileRequest) (*DeleteS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method DeleteS3Profile not implemented") +} +func (UnimplementedBackupServiceServer) SetActiveS3Profile(context.Context, *SetActiveS3ProfileRequest) (*SetActiveS3ProfileResponse, error) { + return nil, status.Error(codes.Unimplemented, "method SetActiveS3Profile not implemented") +} func (UnimplementedBackupServiceServer) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) { return nil, status.Error(codes.Unimplemented, "method CreateBackupJob not implemented") } @@ -236,6 +396,96 @@ func _BackupService_UpdateConfig_Handler(srv interface{}, ctx context.Context, d return interceptor(ctx, in, info, handler) } +func _BackupService_ListSourceProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListSourceProfilesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListSourceProfiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListSourceProfiles_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListSourceProfiles(ctx, req.(*ListSourceProfilesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_CreateSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).CreateSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_CreateSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).CreateSourceProfile(ctx, req.(*CreateSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_UpdateSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).UpdateSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_UpdateSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).UpdateSourceProfile(ctx, req.(*UpdateSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_DeleteSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).DeleteSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_DeleteSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).DeleteSourceProfile(ctx, req.(*DeleteSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_SetActiveSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetActiveSourceProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).SetActiveSourceProfile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_SetActiveSourceProfile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).SetActiveSourceProfile(ctx, req.(*SetActiveSourceProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(ValidateS3Request) if err := dec(in); err != nil { @@ -254,6 +504,96 @@ func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _BackupService_ListS3Profiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListS3ProfilesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).ListS3Profiles(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_ListS3Profiles_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).ListS3Profiles(ctx, req.(*ListS3ProfilesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_CreateS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).CreateS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_CreateS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).CreateS3Profile(ctx, req.(*CreateS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_UpdateS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).UpdateS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_UpdateS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).UpdateS3Profile(ctx, req.(*UpdateS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_DeleteS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).DeleteS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_DeleteS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).DeleteS3Profile(ctx, req.(*DeleteS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _BackupService_SetActiveS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetActiveS3ProfileRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(BackupServiceServer).SetActiveS3Profile(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: BackupService_SetActiveS3Profile_FullMethodName, + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(BackupServiceServer).SetActiveS3Profile(ctx, req.(*SetActiveS3ProfileRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _BackupService_CreateBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateBackupJobRequest) if err := dec(in); err != nil { @@ -327,10 +667,50 @@ var BackupService_ServiceDesc = grpc.ServiceDesc{ MethodName: "UpdateConfig", Handler: _BackupService_UpdateConfig_Handler, }, + { + MethodName: "ListSourceProfiles", + Handler: _BackupService_ListSourceProfiles_Handler, + }, + { + MethodName: "CreateSourceProfile", + Handler: _BackupService_CreateSourceProfile_Handler, + }, + { + MethodName: "UpdateSourceProfile", + Handler: _BackupService_UpdateSourceProfile_Handler, + }, + { + MethodName: "DeleteSourceProfile", + Handler: _BackupService_DeleteSourceProfile_Handler, + }, + { + MethodName: "SetActiveSourceProfile", + Handler: _BackupService_SetActiveSourceProfile_Handler, + }, { MethodName: "ValidateS3", Handler: _BackupService_ValidateS3_Handler, }, + { + MethodName: "ListS3Profiles", + Handler: _BackupService_ListS3Profiles_Handler, + }, + { + MethodName: "CreateS3Profile", + Handler: _BackupService_CreateS3Profile_Handler, + }, + { + MethodName: "UpdateS3Profile", + Handler: _BackupService_UpdateS3Profile_Handler, + }, + { + MethodName: "DeleteS3Profile", + Handler: _BackupService_DeleteS3Profile_Handler, + }, + { + MethodName: "SetActiveS3Profile", + Handler: _BackupService_SetActiveS3Profile_Handler, + }, { MethodName: "CreateBackupJob", Handler: _BackupService_CreateBackupJob_Handler, diff --git a/frontend/src/api/admin/dataManagement.ts b/frontend/src/api/admin/dataManagement.ts index d58b0a11f..cec714467 100644 --- a/frontend/src/api/admin/dataManagement.ts +++ b/frontend/src/api/admin/dataManagement.ts @@ -55,11 +55,40 @@ export interface DataManagementConfig { sqlite_path?: string retention_days: number keep_last: number + active_postgres_profile_id?: string + active_redis_profile_id?: string + active_s3_profile_id?: string postgres: DataManagementPostgresConfig redis: DataManagementRedisConfig s3: DataManagementS3Config } +export type SourceType = 'postgres' | 'redis' + +export interface DataManagementSourceConfig { + host: string + port: number + user: string + password?: string + database: string + ssl_mode: string + addr: string + username: string + db: number + container_name: string +} + +export interface DataManagementSourceProfile { + source_type: SourceType + profile_id: string + name: string + is_active: boolean + password_configured?: boolean + config: DataManagementSourceConfig + created_at?: string + updated_at?: string +} + export interface TestS3Request { endpoint: string region: string @@ -79,6 +108,9 @@ export interface TestS3Response { export interface CreateBackupJobRequest { backup_type: BackupType upload_to_s3?: boolean + s3_profile_id?: string + postgres_profile_id?: string + redis_profile_id?: string idempotency_key?: string } @@ -104,6 +136,9 @@ export interface BackupJob { backup_type: BackupType status: BackupJobStatus triggered_by: string + s3_profile_id?: string + postgres_profile_id?: string + redis_profile_id?: string started_at?: string finished_at?: string error_message?: string @@ -111,6 +146,64 @@ export interface BackupJob { s3?: BackupS3Info } +export interface ListSourceProfilesResponse { + items: DataManagementSourceProfile[] +} + +export interface CreateSourceProfileRequest { + profile_id: string + name: string + config: DataManagementSourceConfig + set_active?: boolean +} + +export interface UpdateSourceProfileRequest { + name: string + config: DataManagementSourceConfig +} + +export interface DataManagementS3Profile { + profile_id: string + name: string + is_active: boolean + s3: DataManagementS3Config + secret_access_key_configured?: boolean + created_at?: string + updated_at?: string +} + +export interface ListS3ProfilesResponse { + items: DataManagementS3Profile[] +} + +export interface CreateS3ProfileRequest { + profile_id: string + name: string + enabled: boolean + endpoint: string + region: string + bucket: string + access_key_id: string + secret_access_key?: string + prefix?: string + force_path_style?: boolean + use_ssl?: boolean + set_active?: boolean +} + +export interface UpdateS3ProfileRequest { + name: string + enabled: boolean + endpoint: string + region: string + bucket: string + access_key_id: string + secret_access_key?: string + prefix?: string + force_path_style?: boolean + use_ssl?: boolean +} + export interface ListBackupJobsRequest { page_size?: number page_token?: string @@ -143,6 +236,54 @@ export async function testS3(request: TestS3Request): Promise { return data } +export async function listSourceProfiles(sourceType: SourceType): Promise { + const { data } = await apiClient.get(`/admin/data-management/sources/${sourceType}/profiles`) + return data +} + +export async function createSourceProfile(sourceType: SourceType, request: CreateSourceProfileRequest): Promise { + const { data } = await apiClient.post(`/admin/data-management/sources/${sourceType}/profiles`, request) + return data +} + +export async function updateSourceProfile(sourceType: SourceType, profileID: string, request: UpdateSourceProfileRequest): Promise { + const { data } = await apiClient.put(`/admin/data-management/sources/${sourceType}/profiles/${profileID}`, request) + return data +} + +export async function deleteSourceProfile(sourceType: SourceType, profileID: string): Promise { + await apiClient.delete(`/admin/data-management/sources/${sourceType}/profiles/${profileID}`) +} + +export async function setActiveSourceProfile(sourceType: SourceType, profileID: string): Promise { + const { data } = await apiClient.post(`/admin/data-management/sources/${sourceType}/profiles/${profileID}/activate`) + return data +} + +export async function listS3Profiles(): Promise { + const { data } = await apiClient.get('/admin/data-management/s3/profiles') + return data +} + +export async function createS3Profile(request: CreateS3ProfileRequest): Promise { + const { data } = await apiClient.post('/admin/data-management/s3/profiles', request) + return data +} + +export async function updateS3Profile(profileID: string, request: UpdateS3ProfileRequest): Promise { + const { data } = await apiClient.put(`/admin/data-management/s3/profiles/${profileID}`, request) + return data +} + +export async function deleteS3Profile(profileID: string): Promise { + await apiClient.delete(`/admin/data-management/s3/profiles/${profileID}`) +} + +export async function setActiveS3Profile(profileID: string): Promise { + const { data } = await apiClient.post(`/admin/data-management/s3/profiles/${profileID}/activate`) + return data +} + export async function createBackupJob(request: CreateBackupJobRequest): Promise { const headers = request.idempotency_key ? { 'X-Idempotency-Key': request.idempotency_key } @@ -172,7 +313,17 @@ export const dataManagementAPI = { getAgentHealth, getConfig, updateConfig, + listSourceProfiles, + createSourceProfile, + updateSourceProfile, + deleteSourceProfile, + setActiveSourceProfile, testS3, + listS3Profiles, + createS3Profile, + updateS3Profile, + deleteS3Profile, + setActiveS3Profile, createBackupJob, listBackupJobs, getBackupJob diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index c4380f9a1..838a4a2e0 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -880,11 +880,22 @@ export default { form: { sourceMode: 'Source Mode', backupRoot: 'Backup Root', + activePostgresProfile: 'Active PostgreSQL Profile', + activeRedisProfile: 'Active Redis Profile', + activeS3Profile: 'Active S3 Profile', retentionDays: 'Retention Days', keepLast: 'Keep Last Jobs', uploadToS3: 'Upload to S3', + useActivePostgresProfile: 'Use Active PostgreSQL Profile', + useActiveRedisProfile: 'Use Active Redis Profile', + useActiveS3Profile: 'Use Active Profile', idempotencyKey: 'Idempotency Key (Optional)', secretConfigured: 'Configured already, leave empty to keep unchanged', + source: { + profileID: 'Profile ID (Unique)', + profileName: 'Profile Name', + setActive: 'Set as active after creation' + }, postgres: { title: 'PostgreSQL', host: 'Host', @@ -905,6 +916,8 @@ export default { }, s3: { enabled: 'Enable S3 Upload', + profileID: 'Profile ID (Unique)', + profileName: 'Profile Name', endpoint: 'Endpoint (Optional)', region: 'Region', bucket: 'Bucket', @@ -912,7 +925,36 @@ export default { secretAccessKey: 'Secret Access Key', prefix: 'Object Prefix', forcePathStyle: 'Force Path Style', - useSSL: 'Use SSL' + useSSL: 'Use SSL', + setActive: 'Set as active after creation' + } + }, + sourceProfiles: { + createTitle: 'Create Source Profile', + editTitle: 'Edit Source Profile', + empty: 'No source profiles yet, create one first', + deleteConfirm: 'Delete source profile {profileID}?', + columns: { + profile: 'Profile', + active: 'Active', + connection: 'Connection', + database: 'Database', + updatedAt: 'Updated At', + actions: 'Actions' + } + }, + s3Profiles: { + createTitle: 'Create S3 Profile', + editTitle: 'Edit S3 Profile', + empty: 'No S3 profiles yet, create one first', + editHint: 'Click "Edit" to modify profile details in the right drawer.', + deleteConfirm: 'Delete S3 profile {profileID}?', + columns: { + profile: 'Profile', + active: 'Active', + storage: 'Storage', + updatedAt: 'Updated At', + actions: 'Actions' } }, history: { @@ -923,6 +965,9 @@ export default { type: 'Type', status: 'Status', triggeredBy: 'Triggered By', + pgProfile: 'PostgreSQL Profile', + redisProfile: 'Redis Profile', + s3Profile: 'S3 Profile', finishedAt: 'Finished At', artifact: 'Artifact', error: 'Error' @@ -939,11 +984,28 @@ export default { refresh: 'Refresh Status', disabledHint: 'Start backupd first and ensure the socket is reachable.', reloadConfig: 'Reload Config', + reloadSourceProfiles: 'Reload Source Profiles', + reloadProfiles: 'Reload Profiles', + newSourceProfile: 'New Source Profile', saveConfig: 'Save Config', configSaved: 'Configuration saved', testS3: 'Test S3 Connection', s3TestOK: 'S3 connection test succeeded', s3TestFailed: 'S3 connection test failed', + newProfile: 'New Profile', + saveProfile: 'Save Profile', + activateProfile: 'Activate', + profileIDRequired: 'Profile ID is required', + profileNameRequired: 'Profile name is required', + profileSelectRequired: 'Select a profile to edit first', + profileCreated: 'S3 profile created', + profileSaved: 'S3 profile saved', + profileActivated: 'S3 profile activated', + profileDeleted: 'S3 profile deleted', + sourceProfileCreated: 'Source profile created', + sourceProfileSaved: 'Source profile saved', + sourceProfileActivated: 'Source profile activated', + sourceProfileDeleted: 'Source profile deleted', createBackup: 'Create Backup Job', jobCreated: 'Backup job created: {jobID} ({status})', refreshJobs: 'Refresh Jobs', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 4593193ec..a3f30cea6 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -903,11 +903,22 @@ export default { form: { sourceMode: '源模式', backupRoot: '备份根目录', + activePostgresProfile: '当前激活 PostgreSQL 配置', + activeRedisProfile: '当前激活 Redis 配置', + activeS3Profile: '当前激活 S3 账号', retentionDays: '保留天数', keepLast: '至少保留最近任务数', uploadToS3: '上传到 S3', + useActivePostgresProfile: '使用当前激活 PostgreSQL 配置', + useActiveRedisProfile: '使用当前激活 Redis 配置', + useActiveS3Profile: '使用当前激活账号', idempotencyKey: '幂等键(可选)', secretConfigured: '已配置,留空不变', + source: { + profileID: '配置 ID(唯一)', + profileName: '配置名称', + setActive: '创建后立即设为激活配置' + }, postgres: { title: 'PostgreSQL', host: '主机', @@ -928,6 +939,8 @@ export default { }, s3: { enabled: '启用 S3 上传', + profileID: '账号 ID(唯一)', + profileName: '账号名称', endpoint: 'Endpoint(可选)', region: 'Region', bucket: 'Bucket', @@ -935,7 +948,36 @@ export default { secretAccessKey: 'Secret Access Key', prefix: '对象前缀', forcePathStyle: '强制 path-style', - useSSL: '使用 SSL' + useSSL: '使用 SSL', + setActive: '创建后立即设为激活账号' + } + }, + sourceProfiles: { + createTitle: '创建数据源配置', + editTitle: '编辑数据源配置', + empty: '暂无配置,请先创建', + deleteConfirm: '确定删除配置 {profileID} 吗?', + columns: { + profile: '配置', + active: '激活状态', + connection: '连接信息', + database: '数据库', + updatedAt: '更新时间', + actions: '操作' + } + }, + s3Profiles: { + createTitle: '创建 S3 账号', + editTitle: '编辑 S3 账号', + empty: '暂无 S3 账号,请先创建', + editHint: '点击“编辑”将在右侧抽屉中修改账号信息。', + deleteConfirm: '确定删除 S3 账号 {profileID} 吗?', + columns: { + profile: '账号', + active: '激活状态', + storage: '存储配置', + updatedAt: '更新时间', + actions: '操作' } }, history: { @@ -946,6 +988,9 @@ export default { type: '类型', status: '状态', triggeredBy: '触发人', + pgProfile: 'PostgreSQL 配置', + redisProfile: 'Redis 配置', + s3Profile: 'S3 账号', finishedAt: '完成时间', artifact: '产物', error: '错误' @@ -962,11 +1007,28 @@ export default { refresh: '刷新状态', disabledHint: '请先启动 backupd 并确认 Socket 可连通。', reloadConfig: '加载配置', + reloadSourceProfiles: '刷新数据源配置', + reloadProfiles: '刷新账号列表', + newSourceProfile: '新建数据源配置', saveConfig: '保存配置', configSaved: '配置保存成功', testS3: '测试 S3 连接', s3TestOK: 'S3 连接测试成功', s3TestFailed: 'S3 连接测试失败', + newProfile: '新建账号', + saveProfile: '保存账号', + activateProfile: '设为激活', + profileIDRequired: '请输入账号 ID', + profileNameRequired: '请输入账号名称', + profileSelectRequired: '请先选择要编辑的账号', + profileCreated: 'S3 账号创建成功', + profileSaved: 'S3 账号保存成功', + profileActivated: 'S3 账号已切换为激活', + profileDeleted: 'S3 账号删除成功', + sourceProfileCreated: '数据源配置创建成功', + sourceProfileSaved: '数据源配置保存成功', + sourceProfileActivated: '数据源配置已切换为激活', + sourceProfileDeleted: '数据源配置删除成功', createBackup: '创建备份任务', jobCreated: '备份任务已创建:{jobID}({status})', refreshJobs: '刷新任务', diff --git a/frontend/src/views/admin/DataManagementView.vue b/frontend/src/views/admin/DataManagementView.vue index ab38751ee..5fba5973a 100644 --- a/frontend/src/views/admin/DataManagementView.vue +++ b/frontend/src/views/admin/DataManagementView.vue @@ -111,87 +111,292 @@ +
+
+
+ {{ t('admin.dataManagement.form.activePostgresProfile') }}: + {{ + config.active_postgres_profile_id || '-' + }} +
+
+ {{ t('admin.dataManagement.form.activeRedisProfile') }}: + {{ + config.active_redis_profile_id || '-' + }} +
+
+ {{ t('admin.dataManagement.form.activeS3Profile') }}: + {{ + config.active_s3_profile_id || '-' + }} +
+
+ +
+
-

- {{ t('admin.dataManagement.form.postgres.title') }} -

-
- - - - - - - +
+

+ {{ t('admin.dataManagement.form.postgres.title') }} +

+
+ + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + +
{{ t('admin.dataManagement.sourceProfiles.columns.profile') }}{{ t('admin.dataManagement.sourceProfiles.columns.active') }}{{ t('admin.dataManagement.sourceProfiles.columns.connection') }}{{ t('admin.dataManagement.sourceProfiles.columns.database') }}{{ t('admin.dataManagement.sourceProfiles.columns.updatedAt') }}{{ t('admin.dataManagement.sourceProfiles.columns.actions') }}
+
{{ profile.profile_id }}
+
{{ profile.name }}
+
+ + {{ profile.is_active ? t('common.enabled') : t('common.disabled') }} + + + {{ profile.config.host || '-' }}:{{ profile.config.port || '-' }} + + {{ profile.config.database || '-' }} + {{ formatDate(profile.updated_at) }} +
+ + + +
+
+ {{ t('admin.dataManagement.sourceProfiles.empty') }} +
-

- {{ t('admin.dataManagement.form.redis.title') }} -

-
- - - - - +
+

+ {{ t('admin.dataManagement.form.redis.title') }} +

+
+ + +
+
+ +
+ + + + + + + + + + + + + + + + + + + + + + + + +
{{ t('admin.dataManagement.sourceProfiles.columns.profile') }}{{ t('admin.dataManagement.sourceProfiles.columns.active') }}{{ t('admin.dataManagement.sourceProfiles.columns.connection') }}{{ t('admin.dataManagement.sourceProfiles.columns.database') }}{{ t('admin.dataManagement.sourceProfiles.columns.updatedAt') }}{{ t('admin.dataManagement.sourceProfiles.columns.actions') }}
+
{{ profile.profile_id }}
+
{{ profile.name }}
+
+ + {{ profile.is_active ? t('common.enabled') : t('common.disabled') }} + + {{ profile.config.addr || '-' }}db={{ profile.config.db }}{{ formatDate(profile.updated_at) }} +
+ + + +
+
+ {{ t('admin.dataManagement.sourceProfiles.empty') }} +
+
-
-

- {{ t('admin.dataManagement.sections.s3.title') }} -

-
- - - - - - - - - +
+
+
+

+ {{ t('admin.dataManagement.sections.s3.title') }} +

+

+ {{ t('admin.dataManagement.sections.s3.description') }} +

-
- -
+ +
+ + + + + + + + + + + + + + + + + + + + + + +
{{ t('admin.dataManagement.s3Profiles.columns.profile') }}{{ t('admin.dataManagement.s3Profiles.columns.active') }}{{ t('admin.dataManagement.s3Profiles.columns.storage') }}{{ t('admin.dataManagement.s3Profiles.columns.updatedAt') }}{{ t('admin.dataManagement.s3Profiles.columns.actions') }}
+
{{ profile.profile_id }}
+
{{ profile.name }}
+
+ + {{ profile.is_active ? t('common.enabled') : t('common.disabled') }} + + +
{{ profile.s3.bucket || '-' }}
+
{{ profile.s3.region || '-' }}
+
{{ formatDate(profile.updated_at) }} +
+ + + +
+
+ {{ t('admin.dataManagement.s3Profiles.empty') }} +
+
+ +
+ {{ t('admin.dataManagement.s3Profiles.editHint') }} +
@@ -202,19 +407,50 @@ {{ t('admin.dataManagement.sections.backup.description') }}

-
+
+ + + + + + + + @@ -244,13 +480,16 @@
- +
+ + + @@ -266,6 +505,9 @@ + + + - @@ -291,6 +533,185 @@ + + + +
+
+ + +
+
+

+ {{ creatingSourceProfile ? t('admin.dataManagement.sourceProfiles.createTitle') : t('admin.dataManagement.sourceProfiles.editTitle') }} + · {{ sourceDrawerTypeLabel }} +

+ +
+ +
+
+ + + + + + + + +
+
+ +
+ + +
+
+
+
+ + + +
+
+ + +
+
+

+ {{ creatingProfile ? t('admin.dataManagement.s3Profiles.createTitle') : t('admin.dataManagement.s3Profiles.editTitle') }} +

+ +
+ +
+
+ + + + + + + + + + + + +
+
+ +
+ + + +
+
+
+
@@ -303,7 +724,10 @@ import { type BackupAgentHealth, type BackupJob, type BackupJobStatus, - type DataManagementConfig + type DataManagementConfig, + type DataManagementS3Profile, + type DataManagementSourceProfile, + type SourceType } from '@/api/admin/dataManagement' import { useAppStore } from '@/stores' @@ -314,8 +738,20 @@ const loading = ref(false) const loadingConfig = ref(false) const savingConfig = ref(false) const testingS3 = ref(false) +const loadingProfiles = ref(false) +const savingProfile = ref(false) +const activatingProfile = ref(false) +const deletingProfile = ref(false) +const creatingProfile = ref(false) +const profileDrawerOpen = ref(false) const creatingBackup = ref(false) const loadingJobs = ref(false) +const loadingSourceProfiles = ref(false) +const savingSourceProfile = ref(false) +const activatingSourceProfile = ref(false) +const deletingSourceProfile = ref(false) +const creatingSourceProfile = ref(false) +const sourceDrawerOpen = ref(false) const health = ref({ enabled: false, @@ -326,13 +762,58 @@ const health = ref({ const config = ref(newDefaultConfig()) const jobs = ref([]) const nextPageToken = ref('') +const s3Profiles = ref([]) +const selectedProfileID = ref('') +const postgresProfiles = ref([]) +const redisProfiles = ref([]) +const sourceDrawerType = ref('postgres') +const selectedSourceProfileID = ref('') const createForm = ref({ backup_type: 'full' as 'postgres' | 'redis' | 'full', upload_to_s3: true, + s3_profile_id: '', + postgres_profile_id: '', + redis_profile_id: '', idempotency_key: '' }) +type S3ProfileForm = { + profile_id: string + name: string + enabled: boolean + endpoint: string + region: string + bucket: string + access_key_id: string + secret_access_key: string + secret_access_key_configured: boolean + prefix: string + force_path_style: boolean + use_ssl: boolean + set_active: boolean +} + +type SourceProfileForm = { + profile_id: string + name: string + set_active: boolean + host: string + port: number + user: string + password: string + password_configured: boolean + database: string + ssl_mode: string + addr: string + username: string + db: number + container_name: string +} + +const profileForm = ref(newDefaultS3ProfileForm()) +const sourceForm = ref(newDefaultSourceProfileForm()) + const statusBadgeClass = computed(() => { return health.value.enabled ? 'bg-green-100 text-green-700 dark:bg-green-900/30 dark:text-green-300' @@ -362,6 +843,12 @@ const statusMessage = computed(() => { : t('admin.dataManagement.agent.disabled') }) +const sourceDrawerTypeLabel = computed(() => { + return sourceDrawerType.value === 'postgres' + ? t('admin.dataManagement.form.postgres.title') + : t('admin.dataManagement.form.redis.title') +}) + async function loadAgentHealth() { loading.value = true try { @@ -397,6 +884,13 @@ async function loadConfig() { ...result.s3 } } + + if (!createForm.value.postgres_profile_id) { + createForm.value.postgres_profile_id = config.value.active_postgres_profile_id || '' + } + if (!createForm.value.redis_profile_id) { + createForm.value.redis_profile_id = config.value.active_redis_profile_id || '' + } } catch (error) { appStore.showError((error as { message?: string })?.message || t('errors.networkError')) } finally { @@ -411,7 +905,14 @@ async function saveConfig() { savingConfig.value = true try { const updated = await dataManagementAPI.updateConfig(config.value) - config.value = { ...config.value, ...updated } + config.value = { + ...config.value, + ...updated, + postgres: { ...config.value.postgres, ...(updated.postgres || {}) }, + redis: { ...config.value.redis, ...(updated.redis || {}) }, + s3: { ...config.value.s3, ...(updated.s3 || {}) } + } + await loadS3Profiles() appStore.showSuccess(t('admin.dataManagement.actions.configSaved')) } catch (error) { appStore.showError((error as { message?: string })?.message || t('errors.networkError')) @@ -420,21 +921,189 @@ async function saveConfig() { } } -async function testS3Config() { +async function loadSourceProfiles() { + if (!health.value.enabled) { + return + } + loadingSourceProfiles.value = true + try { + const [pgResult, redisResult] = await Promise.all([ + dataManagementAPI.listSourceProfiles('postgres'), + dataManagementAPI.listSourceProfiles('redis') + ]) + postgresProfiles.value = pgResult.items || [] + redisProfiles.value = redisResult.items || [] + + if (sourceDrawerOpen.value && !creatingSourceProfile.value) { + syncSourceFormWithSelection() + } + + if (createForm.value.postgres_profile_id) { + const exists = postgresProfiles.value.some((item) => item.profile_id === createForm.value.postgres_profile_id) + if (!exists) { + createForm.value.postgres_profile_id = config.value.active_postgres_profile_id || '' + } + } + if (createForm.value.redis_profile_id) { + const exists = redisProfiles.value.some((item) => item.profile_id === createForm.value.redis_profile_id) + if (!exists) { + createForm.value.redis_profile_id = config.value.active_redis_profile_id || '' + } + } + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + loadingSourceProfiles.value = false + } +} + +function startCreateSourceProfile(sourceType: SourceType) { + creatingSourceProfile.value = true + sourceDrawerType.value = sourceType + selectedSourceProfileID.value = '' + sourceForm.value = newDefaultSourceProfileForm(undefined, sourceType) + sourceDrawerOpen.value = true +} + +function editSourceProfile(sourceType: SourceType, profileID: string) { + creatingSourceProfile.value = false + sourceDrawerType.value = sourceType + selectedSourceProfileID.value = profileID + syncSourceFormWithSelection() + sourceDrawerOpen.value = true +} + +function closeSourceDrawer() { + sourceDrawerOpen.value = false + if (creatingSourceProfile.value) { + creatingSourceProfile.value = false + selectedSourceProfileID.value = '' + } +} + +function syncSourceFormWithSelection() { + const targetProfiles = sourceDrawerType.value === 'postgres' ? postgresProfiles.value : redisProfiles.value + const profile = targetProfiles.find((item) => item.profile_id === selectedSourceProfileID.value) + sourceForm.value = newDefaultSourceProfileForm(profile, sourceDrawerType.value) +} + +async function saveSourceProfile() { + if (!health.value.enabled) { + return + } + if (!sourceForm.value.name.trim()) { + appStore.showError(t('admin.dataManagement.actions.profileNameRequired')) + return + } + if (creatingSourceProfile.value && !sourceForm.value.profile_id.trim()) { + appStore.showError(t('admin.dataManagement.actions.profileIDRequired')) + return + } + if (!creatingSourceProfile.value && !selectedSourceProfileID.value) { + appStore.showError(t('admin.dataManagement.actions.profileSelectRequired')) + return + } + + savingSourceProfile.value = true + try { + const payload = { + name: sourceForm.value.name.trim(), + config: { + host: sourceForm.value.host, + port: sourceForm.value.port, + user: sourceForm.value.user, + password: sourceForm.value.password || undefined, + database: sourceForm.value.database, + ssl_mode: sourceForm.value.ssl_mode, + addr: sourceForm.value.addr, + username: sourceForm.value.username, + db: sourceForm.value.db, + container_name: sourceForm.value.container_name + } + } + + if (creatingSourceProfile.value) { + const created = await dataManagementAPI.createSourceProfile(sourceDrawerType.value, { + profile_id: sourceForm.value.profile_id.trim(), + set_active: sourceForm.value.set_active, + ...payload + }) + selectedSourceProfileID.value = created.profile_id + creatingSourceProfile.value = false + sourceDrawerOpen.value = false + appStore.showSuccess(t('admin.dataManagement.actions.sourceProfileCreated')) + } else { + await dataManagementAPI.updateSourceProfile( + sourceDrawerType.value, + selectedSourceProfileID.value, + payload + ) + sourceDrawerOpen.value = false + appStore.showSuccess(t('admin.dataManagement.actions.sourceProfileSaved')) + } + + await Promise.all([loadConfig(), loadSourceProfiles()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + savingSourceProfile.value = false + } +} + +async function activateSourceProfile(sourceType: SourceType, profileID: string) { + if (!health.value.enabled) { + return + } + activatingSourceProfile.value = true + try { + await dataManagementAPI.setActiveSourceProfile(sourceType, profileID) + appStore.showSuccess(t('admin.dataManagement.actions.sourceProfileActivated')) + await Promise.all([loadConfig(), loadSourceProfiles()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + activatingSourceProfile.value = false + } +} + +async function removeSourceProfile(sourceType: SourceType, profileID: string) { + if (!health.value.enabled) { + return + } + if (!window.confirm(t('admin.dataManagement.sourceProfiles.deleteConfirm', { profileID }))) { + return + } + + deletingSourceProfile.value = true + try { + await dataManagementAPI.deleteSourceProfile(sourceType, profileID) + if (selectedSourceProfileID.value === profileID) { + selectedSourceProfileID.value = '' + } + appStore.showSuccess(t('admin.dataManagement.actions.sourceProfileDeleted')) + await Promise.all([loadConfig(), loadSourceProfiles()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + deletingSourceProfile.value = false + } +} + +async function testProfileS3Config() { if (!health.value.enabled) { return } testingS3.value = true try { const result = await dataManagementAPI.testS3({ - endpoint: config.value.s3.endpoint, - region: config.value.s3.region, - bucket: config.value.s3.bucket, - access_key_id: config.value.s3.access_key_id, - secret_access_key: config.value.s3.secret_access_key || '', - prefix: config.value.s3.prefix, - force_path_style: config.value.s3.force_path_style, - use_ssl: config.value.s3.use_ssl + endpoint: profileForm.value.endpoint, + region: profileForm.value.region, + bucket: profileForm.value.bucket, + access_key_id: profileForm.value.access_key_id, + secret_access_key: profileForm.value.secret_access_key || '', + prefix: profileForm.value.prefix, + force_path_style: profileForm.value.force_path_style, + use_ssl: profileForm.value.use_ssl }) if (result.ok) { appStore.showSuccess(result.message || t('admin.dataManagement.actions.s3TestOK')) @@ -448,15 +1117,187 @@ async function testS3Config() { } } +async function loadS3Profiles() { + if (!health.value.enabled) { + return + } + loadingProfiles.value = true + try { + const result = await dataManagementAPI.listS3Profiles() + s3Profiles.value = result.items || [] + + if (!creatingProfile.value) { + const stillExists = selectedProfileID.value + ? s3Profiles.value.some((item) => item.profile_id === selectedProfileID.value) + : false + if (!stillExists) { + selectedProfileID.value = pickPreferredProfileID() + } + syncProfileFormWithSelection() + } + + if (createForm.value.s3_profile_id) { + const selectable = s3Profiles.value.some((item) => item.profile_id === createForm.value.s3_profile_id) + if (!selectable) { + createForm.value.s3_profile_id = '' + } + } + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + loadingProfiles.value = false + } +} + +function startCreateProfile() { + creatingProfile.value = true + selectedProfileID.value = '' + profileForm.value = newDefaultS3ProfileForm() + profileDrawerOpen.value = true +} + +function editS3Profile(profileID: string) { + selectedProfileID.value = profileID + creatingProfile.value = false + syncProfileFormWithSelection() + profileDrawerOpen.value = true +} + +function closeProfileDrawer() { + profileDrawerOpen.value = false + if (creatingProfile.value) { + creatingProfile.value = false + selectedProfileID.value = pickPreferredProfileID() + syncProfileFormWithSelection() + } +} + +async function saveS3Profile() { + if (!health.value.enabled) { + return + } + if (!profileForm.value.name.trim()) { + appStore.showError(t('admin.dataManagement.actions.profileNameRequired')) + return + } + if (creatingProfile.value && !profileForm.value.profile_id.trim()) { + appStore.showError(t('admin.dataManagement.actions.profileIDRequired')) + return + } + if (!creatingProfile.value && !selectedProfileID.value) { + appStore.showError(t('admin.dataManagement.actions.profileSelectRequired')) + return + } + + savingProfile.value = true + try { + if (creatingProfile.value) { + const created = await dataManagementAPI.createS3Profile({ + profile_id: profileForm.value.profile_id.trim(), + name: profileForm.value.name.trim(), + enabled: profileForm.value.enabled, + endpoint: profileForm.value.endpoint, + region: profileForm.value.region, + bucket: profileForm.value.bucket, + access_key_id: profileForm.value.access_key_id, + secret_access_key: profileForm.value.secret_access_key || undefined, + prefix: profileForm.value.prefix, + force_path_style: profileForm.value.force_path_style, + use_ssl: profileForm.value.use_ssl, + set_active: profileForm.value.set_active + }) + selectedProfileID.value = created.profile_id + creatingProfile.value = false + profileDrawerOpen.value = false + appStore.showSuccess(t('admin.dataManagement.actions.profileCreated')) + } else { + await dataManagementAPI.updateS3Profile(selectedProfileID.value, { + name: profileForm.value.name.trim(), + enabled: profileForm.value.enabled, + endpoint: profileForm.value.endpoint, + region: profileForm.value.region, + bucket: profileForm.value.bucket, + access_key_id: profileForm.value.access_key_id, + secret_access_key: profileForm.value.secret_access_key || undefined, + prefix: profileForm.value.prefix, + force_path_style: profileForm.value.force_path_style, + use_ssl: profileForm.value.use_ssl + }) + profileDrawerOpen.value = false + appStore.showSuccess(t('admin.dataManagement.actions.profileSaved')) + } + + await Promise.all([loadConfig(), loadS3Profiles()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + savingProfile.value = false + } +} + +async function activateS3Profile(profileID: string) { + if (!health.value.enabled) { + return + } + activatingProfile.value = true + try { + await dataManagementAPI.setActiveS3Profile(profileID) + appStore.showSuccess(t('admin.dataManagement.actions.profileActivated')) + await Promise.all([loadConfig(), loadS3Profiles()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + activatingProfile.value = false + } +} + +async function removeS3Profile(profileID: string) { + if (!health.value.enabled) { + return + } + if (!window.confirm(t('admin.dataManagement.s3Profiles.deleteConfirm', { profileID }))) { + return + } + + deletingProfile.value = true + try { + await dataManagementAPI.deleteS3Profile(profileID) + if (selectedProfileID.value === profileID) { + selectedProfileID.value = '' + } + appStore.showSuccess(t('admin.dataManagement.actions.profileDeleted')) + await Promise.all([loadConfig(), loadS3Profiles()]) + } catch (error) { + appStore.showError((error as { message?: string })?.message || t('errors.networkError')) + } finally { + deletingProfile.value = false + } +} + async function createBackup() { if (!health.value.enabled) { return } creatingBackup.value = true try { + const needPostgres = createForm.value.backup_type === 'postgres' || createForm.value.backup_type === 'full' + const needRedis = createForm.value.backup_type === 'redis' || createForm.value.backup_type === 'full' + const result = await dataManagementAPI.createBackupJob({ backup_type: createForm.value.backup_type, upload_to_s3: createForm.value.upload_to_s3, + s3_profile_id: + createForm.value.upload_to_s3 && createForm.value.s3_profile_id + ? createForm.value.s3_profile_id + : undefined, + postgres_profile_id: + needPostgres && createForm.value.postgres_profile_id + ? createForm.value.postgres_profile_id + : undefined, + redis_profile_id: + needRedis && createForm.value.redis_profile_id + ? createForm.value.redis_profile_id + : undefined, idempotency_key: createForm.value.idempotency_key || undefined }) appStore.showSuccess( @@ -532,12 +1373,28 @@ function statusText(status: BackupJobStatus): string { return t(key) } +function pickPreferredProfileID(): string { + const active = s3Profiles.value.find((item) => item.is_active) + if (active) { + return active.profile_id + } + return s3Profiles.value[0]?.profile_id || '' +} + +function syncProfileFormWithSelection() { + const profile = s3Profiles.value.find((item) => item.profile_id === selectedProfileID.value) + profileForm.value = newDefaultS3ProfileForm(profile) +} + function newDefaultConfig(): DataManagementConfig { return { source_mode: 'direct', backup_root: '/var/lib/sub2api/backups', retention_days: 7, keep_last: 30, + active_postgres_profile_id: '', + active_redis_profile_id: '', + active_s3_profile_id: '', postgres: { host: '127.0.0.1', port: 5432, @@ -571,10 +1428,119 @@ function newDefaultConfig(): DataManagementConfig { } } +function newDefaultS3ProfileForm(profile?: DataManagementS3Profile): S3ProfileForm { + if (!profile) { + return { + profile_id: '', + name: '', + enabled: false, + endpoint: '', + region: '', + bucket: '', + access_key_id: '', + secret_access_key: '', + secret_access_key_configured: false, + prefix: '', + force_path_style: false, + use_ssl: true, + set_active: false + } + } + + return { + profile_id: profile.profile_id, + name: profile.name, + enabled: profile.s3?.enabled || false, + endpoint: profile.s3?.endpoint || '', + region: profile.s3?.region || '', + bucket: profile.s3?.bucket || '', + access_key_id: profile.s3?.access_key_id || '', + secret_access_key: '', + secret_access_key_configured: + Boolean(profile.secret_access_key_configured) || Boolean(profile.s3?.secret_access_key_configured), + prefix: profile.s3?.prefix || '', + force_path_style: profile.s3?.force_path_style || false, + use_ssl: profile.s3?.use_ssl ?? true, + set_active: false + } +} + +function newDefaultSourceProfileForm(profile?: DataManagementSourceProfile, sourceType: SourceType = 'postgres'): SourceProfileForm { + if (!profile) { + return { + profile_id: '', + name: '', + set_active: false, + host: sourceType === 'postgres' ? '127.0.0.1' : '', + port: sourceType === 'postgres' ? 5432 : 0, + user: sourceType === 'postgres' ? 'postgres' : '', + password: '', + password_configured: false, + database: sourceType === 'postgres' ? 'sub2api' : '', + ssl_mode: sourceType === 'postgres' ? 'disable' : '', + addr: sourceType === 'redis' ? '127.0.0.1:6379' : '', + username: '', + db: 0, + container_name: '' + } + } + + return { + profile_id: profile.profile_id, + name: profile.name, + set_active: false, + host: profile.config.host || '', + port: profile.config.port || 0, + user: profile.config.user || '', + password: '', + password_configured: Boolean(profile.password_configured), + database: profile.config.database || '', + ssl_mode: profile.config.ssl_mode || '', + addr: profile.config.addr || '', + username: profile.config.username || '', + db: profile.config.db || 0, + container_name: profile.config.container_name || '' + } +} + onMounted(async () => { await loadAgentHealth() if (health.value.enabled) { - await Promise.all([loadConfig(), refreshJobs()]) + await Promise.all([loadConfig(), loadSourceProfiles(), loadS3Profiles(), refreshJobs()]) } }) + + From 1139c33cc3478dad55d43a82f9677b55e204e59e Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 11:50:36 +0800 Subject: [PATCH 004/120] =?UTF-8?q?feat(openai-ws):=20=E5=BC=95=E5=85=A5WS?= =?UTF-8?q?v2=E5=8D=8F=E8=AE=AE=E9=93=BE=E8=B7=AF=E5=B9=B6=E5=AE=8C?= =?UTF-8?q?=E5=96=84=E5=A4=87=E4=BB=BD=E4=BB=BB=E5=8A=A1=E7=A8=B3=E5=AE=9A?= =?UTF-8?q?=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/go.mod | 1 + backend/go.sum | 2 + backend/internal/config/config.go | 167 +++ backend/internal/config/config_test.go | 201 ++++ .../handler/openai_gateway_handler.go | 37 +- backend/internal/service/account.go | 56 + .../account_openai_passthrough_test.go | 99 ++ .../internal/service/data_management_grpc.go | 71 +- .../service/data_management_grpc_test.go | 63 + .../service/openai_account_scheduler.go | 665 +++++++++++ .../service/openai_account_scheduler_test.go | 275 +++++ .../service/openai_gateway_service.go | 147 ++- .../service/openai_ws_account_sticky_test.go | 124 ++ backend/internal/service/openai_ws_client.go | 129 ++ .../service/openai_ws_fallback_test.go | 60 + .../internal/service/openai_ws_forwarder.go | 959 +++++++++++++++ .../openai_ws_forwarder_success_test.go | 766 ++++++++++++ backend/internal/service/openai_ws_pool.go | 1064 +++++++++++++++++ .../internal/service/openai_ws_pool_test.go | 285 +++++ .../openai_ws_protocol_forward_test.go | 395 ++++++ .../service/openai_ws_protocol_resolver.go | 88 ++ .../openai_ws_protocol_resolver_test.go | 109 ++ .../internal/service/openai_ws_state_store.go | 285 +++++ .../service/openai_ws_state_store_test.go | 76 ++ .../internal/service/ops_upstream_context.go | 4 + backup/internal/store/entstore/store.go | 72 +- backup/internal/store/entstore/store_test.go | 43 + deploy/config.example.yaml | 45 + .../components/account/CreateAccountModal.vue | 67 ++ .../components/account/EditAccountModal.vue | 79 ++ frontend/src/i18n/locales/en.ts | 14 + frontend/src/i18n/locales/zh.ts | 12 + .../perf/openai_responses_ws_v2_compare_k6.js | 167 +++ tools/perf/openai_ws_pooling_compare_k6.js | 123 ++ 34 files changed, 6682 insertions(+), 68 deletions(-) create mode 100644 backend/internal/service/openai_account_scheduler.go create mode 100644 backend/internal/service/openai_account_scheduler_test.go create mode 100644 backend/internal/service/openai_ws_account_sticky_test.go create mode 100644 backend/internal/service/openai_ws_client.go create mode 100644 backend/internal/service/openai_ws_fallback_test.go create mode 100644 backend/internal/service/openai_ws_forwarder.go create mode 100644 backend/internal/service/openai_ws_forwarder_success_test.go create mode 100644 backend/internal/service/openai_ws_pool.go create mode 100644 backend/internal/service/openai_ws_pool_test.go create mode 100644 backend/internal/service/openai_ws_protocol_forward_test.go create mode 100644 backend/internal/service/openai_ws_protocol_resolver.go create mode 100644 backend/internal/service/openai_ws_protocol_resolver_test.go create mode 100644 backend/internal/service/openai_ws_state_store.go create mode 100644 backend/internal/service/openai_ws_state_store_test.go create mode 100644 tools/perf/openai_responses_ws_v2_compare_k6.js create mode 100644 tools/perf/openai_ws_pooling_compare_k6.js diff --git a/backend/go.mod b/backend/go.mod index 49bf4f154..9ca9eb1a8 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -53,6 +53,7 @@ require ( github.com/bytedance/sonic v1.9.1 // indirect github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect + github.com/coder/websocket v1.8.14 // indirect github.com/containerd/errdefs v1.0.0 // indirect github.com/containerd/errdefs/pkg v0.3.0 // indirect github.com/containerd/log v0.1.0 // indirect diff --git a/backend/go.sum b/backend/go.sum index 2aff32cbe..ea707b2ea 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -40,6 +40,8 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= +github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index c4d4fdab3..939a4ae95 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -364,6 +364,8 @@ type GatewayConfig struct { // OpenAIPassthroughAllowTimeoutHeaders: OpenAI 透传模式是否放行客户端超时头 // 关闭(默认)可避免 x-stainless-timeout 等头导致上游提前断流。 OpenAIPassthroughAllowTimeoutHeaders bool `mapstructure:"openai_passthrough_allow_timeout_headers"` + // OpenAIWS: OpenAI Responses WebSocket 配置(默认关闭,不影响现有 HTTP 线路) + OpenAIWS GatewayOpenAIWSConfig `mapstructure:"openai_ws"` // HTTP 上游连接池配置(性能优化:支持高并发场景调优) // MaxIdleConns: 所有主机的最大空闲连接总数 @@ -450,6 +452,65 @@ type GatewayConfig struct { ModelsListCacheTTLSeconds int `mapstructure:"models_list_cache_ttl_seconds"` } +// GatewayOpenAIWSConfig OpenAI Responses WebSocket 配置。 +// 注意:默认全局关闭,确保现网仍走现有 HTTP/SSE 链路。 +type GatewayOpenAIWSConfig struct { + // Enabled: 全局总开关(默认 false) + Enabled bool `mapstructure:"enabled"` + // OAuthEnabled: 是否允许 OpenAI OAuth 账号使用 WS + OAuthEnabled bool `mapstructure:"oauth_enabled"` + // APIKeyEnabled: 是否允许 OpenAI API Key 账号使用 WS + APIKeyEnabled bool `mapstructure:"apikey_enabled"` + // ForceHTTP: 全局强制 HTTP(用于紧急回滚) + ForceHTTP bool `mapstructure:"force_http"` + // AllowStoreRecovery: 允许在 WSv2 下按策略恢复 store=true(默认 false) + AllowStoreRecovery bool `mapstructure:"allow_store_recovery"` + // PrewarmGenerateEnabled: 是否启用 WSv2 generate=false 预热(默认 false) + PrewarmGenerateEnabled bool `mapstructure:"prewarm_generate_enabled"` + + // Feature 开关:v2 优先于 v1 + ResponsesWebsockets bool `mapstructure:"responses_websockets"` + ResponsesWebsocketsV2 bool `mapstructure:"responses_websockets_v2"` + + // 连接池参数 + MaxConnsPerAccount int `mapstructure:"max_conns_per_account"` + MinIdlePerAccount int `mapstructure:"min_idle_per_account"` + MaxIdlePerAccount int `mapstructure:"max_idle_per_account"` + // DynamicMaxConnsByAccountConcurrencyEnabled: 是否按账号并发动态计算连接池上限 + DynamicMaxConnsByAccountConcurrencyEnabled bool `mapstructure:"dynamic_max_conns_by_account_concurrency_enabled"` + // OAuthMaxConnsFactor: OAuth 账号连接池系数(effective=ceil(concurrency*factor)) + OAuthMaxConnsFactor float64 `mapstructure:"oauth_max_conns_factor"` + // APIKeyMaxConnsFactor: API Key 账号连接池系数(effective=ceil(concurrency*factor)) + APIKeyMaxConnsFactor float64 `mapstructure:"apikey_max_conns_factor"` + DialTimeoutSeconds int `mapstructure:"dial_timeout_seconds"` + ReadTimeoutSeconds int `mapstructure:"read_timeout_seconds"` + WriteTimeoutSeconds int `mapstructure:"write_timeout_seconds"` + PoolTargetUtilization float64 `mapstructure:"pool_target_utilization"` + QueueLimitPerConn int `mapstructure:"queue_limit_per_conn"` + // FallbackCooldownSeconds: WS 回退冷却窗口,避免 WS/HTTP 抖动;0 表示关闭冷却 + FallbackCooldownSeconds int `mapstructure:"fallback_cooldown_seconds"` + + // 账号调度与粘连参数 + LBTopK int `mapstructure:"lb_top_k"` + // StickySessionTTLSeconds: session_hash -> account_id 粘连 TTL + StickySessionTTLSeconds int `mapstructure:"sticky_session_ttl_seconds"` + // StickyResponseIDTTLSeconds: response_id -> account_id 粘连 TTL + StickyResponseIDTTLSeconds int `mapstructure:"sticky_response_id_ttl_seconds"` + // StickyPreviousResponseTTLSeconds: 兼容旧键(当新键未设置时回退) + StickyPreviousResponseTTLSeconds int `mapstructure:"sticky_previous_response_ttl_seconds"` + + SchedulerScoreWeights GatewayOpenAIWSSchedulerScoreWeights `mapstructure:"scheduler_score_weights"` +} + +// GatewayOpenAIWSSchedulerScoreWeights 账号调度打分权重。 +type GatewayOpenAIWSSchedulerScoreWeights struct { + Priority float64 `mapstructure:"priority"` + Load float64 `mapstructure:"load"` + Queue float64 `mapstructure:"queue"` + ErrorRate float64 `mapstructure:"error_rate"` + TTFT float64 `mapstructure:"ttft"` +} + // GatewayUsageRecordConfig 使用量记录异步队列配置 type GatewayUsageRecordConfig struct { // WorkerCount: worker 初始数量(自动扩缩容开启时作为初始并发上限) @@ -886,6 +947,12 @@ func load(allowMissingJWTSecret bool) (*Config, error) { cfg.Log.StacktraceLevel = strings.ToLower(strings.TrimSpace(cfg.Log.StacktraceLevel)) cfg.Log.Output.FilePath = strings.TrimSpace(cfg.Log.Output.FilePath) + // 兼容旧键 gateway.openai_ws.sticky_previous_response_ttl_seconds。 + // 新键未配置(<=0)时回退旧键;新键优先。 + if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds <= 0 && cfg.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds > 0 { + cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = cfg.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds + } + // Auto-generate TOTP encryption key if not set (32 bytes = 64 hex chars for AES-256) cfg.Totp.EncryptionKey = strings.TrimSpace(cfg.Totp.EncryptionKey) if cfg.Totp.EncryptionKey == "" { @@ -1157,6 +1224,36 @@ func setDefaults() { viper.SetDefault("gateway.max_account_switches_gemini", 3) viper.SetDefault("gateway.force_codex_cli", false) viper.SetDefault("gateway.openai_passthrough_allow_timeout_headers", false) + // OpenAI Responses WebSocket(默认关闭,不影响现网 HTTP 线路) + viper.SetDefault("gateway.openai_ws.enabled", false) + viper.SetDefault("gateway.openai_ws.oauth_enabled", true) + viper.SetDefault("gateway.openai_ws.apikey_enabled", true) + viper.SetDefault("gateway.openai_ws.force_http", false) + viper.SetDefault("gateway.openai_ws.allow_store_recovery", false) + viper.SetDefault("gateway.openai_ws.prewarm_generate_enabled", false) + viper.SetDefault("gateway.openai_ws.responses_websockets", false) + viper.SetDefault("gateway.openai_ws.responses_websockets_v2", true) + viper.SetDefault("gateway.openai_ws.max_conns_per_account", 8) + viper.SetDefault("gateway.openai_ws.min_idle_per_account", 1) + viper.SetDefault("gateway.openai_ws.max_idle_per_account", 4) + viper.SetDefault("gateway.openai_ws.dynamic_max_conns_by_account_concurrency_enabled", true) + viper.SetDefault("gateway.openai_ws.oauth_max_conns_factor", 1.0) + viper.SetDefault("gateway.openai_ws.apikey_max_conns_factor", 1.0) + viper.SetDefault("gateway.openai_ws.dial_timeout_seconds", 10) + viper.SetDefault("gateway.openai_ws.read_timeout_seconds", 900) + viper.SetDefault("gateway.openai_ws.write_timeout_seconds", 120) + viper.SetDefault("gateway.openai_ws.pool_target_utilization", 0.7) + viper.SetDefault("gateway.openai_ws.queue_limit_per_conn", 256) + viper.SetDefault("gateway.openai_ws.fallback_cooldown_seconds", 30) + viper.SetDefault("gateway.openai_ws.lb_top_k", 3) + viper.SetDefault("gateway.openai_ws.sticky_session_ttl_seconds", 3600) + viper.SetDefault("gateway.openai_ws.sticky_response_id_ttl_seconds", 3600) + viper.SetDefault("gateway.openai_ws.sticky_previous_response_ttl_seconds", 3600) + viper.SetDefault("gateway.openai_ws.scheduler_score_weights.priority", 1.0) + viper.SetDefault("gateway.openai_ws.scheduler_score_weights.load", 1.0) + viper.SetDefault("gateway.openai_ws.scheduler_score_weights.queue", 0.7) + viper.SetDefault("gateway.openai_ws.scheduler_score_weights.error_rate", 0.8) + viper.SetDefault("gateway.openai_ws.scheduler_score_weights.ttft", 0.5) viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 1) viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) viper.SetDefault("gateway.upstream_response_read_max_bytes", int64(8*1024*1024)) @@ -1746,6 +1843,76 @@ func (c *Config) Validate() error { (c.Gateway.StreamKeepaliveInterval < 5 || c.Gateway.StreamKeepaliveInterval > 30) { return fmt.Errorf("gateway.stream_keepalive_interval must be 0 or between 5-30 seconds") } + // 兼容旧键 sticky_previous_response_ttl_seconds + if c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds <= 0 && c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds > 0 { + c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds + } + if c.Gateway.OpenAIWS.MaxConnsPerAccount <= 0 { + return fmt.Errorf("gateway.openai_ws.max_conns_per_account must be positive") + } + if c.Gateway.OpenAIWS.MinIdlePerAccount < 0 { + return fmt.Errorf("gateway.openai_ws.min_idle_per_account must be non-negative") + } + if c.Gateway.OpenAIWS.MaxIdlePerAccount < 0 { + return fmt.Errorf("gateway.openai_ws.max_idle_per_account must be non-negative") + } + if c.Gateway.OpenAIWS.MinIdlePerAccount > c.Gateway.OpenAIWS.MaxIdlePerAccount { + return fmt.Errorf("gateway.openai_ws.min_idle_per_account must be <= max_idle_per_account") + } + if c.Gateway.OpenAIWS.MaxIdlePerAccount > c.Gateway.OpenAIWS.MaxConnsPerAccount { + return fmt.Errorf("gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account") + } + if c.Gateway.OpenAIWS.OAuthMaxConnsFactor <= 0 { + return fmt.Errorf("gateway.openai_ws.oauth_max_conns_factor must be positive") + } + if c.Gateway.OpenAIWS.APIKeyMaxConnsFactor <= 0 { + return fmt.Errorf("gateway.openai_ws.apikey_max_conns_factor must be positive") + } + if c.Gateway.OpenAIWS.DialTimeoutSeconds <= 0 { + return fmt.Errorf("gateway.openai_ws.dial_timeout_seconds must be positive") + } + if c.Gateway.OpenAIWS.ReadTimeoutSeconds <= 0 { + return fmt.Errorf("gateway.openai_ws.read_timeout_seconds must be positive") + } + if c.Gateway.OpenAIWS.WriteTimeoutSeconds <= 0 { + return fmt.Errorf("gateway.openai_ws.write_timeout_seconds must be positive") + } + if c.Gateway.OpenAIWS.PoolTargetUtilization <= 0 || c.Gateway.OpenAIWS.PoolTargetUtilization > 1 { + return fmt.Errorf("gateway.openai_ws.pool_target_utilization must be within (0,1]") + } + if c.Gateway.OpenAIWS.QueueLimitPerConn <= 0 { + return fmt.Errorf("gateway.openai_ws.queue_limit_per_conn must be positive") + } + if c.Gateway.OpenAIWS.FallbackCooldownSeconds < 0 { + return fmt.Errorf("gateway.openai_ws.fallback_cooldown_seconds must be non-negative") + } + if c.Gateway.OpenAIWS.LBTopK <= 0 { + return fmt.Errorf("gateway.openai_ws.lb_top_k must be positive") + } + if c.Gateway.OpenAIWS.StickySessionTTLSeconds <= 0 { + return fmt.Errorf("gateway.openai_ws.sticky_session_ttl_seconds must be positive") + } + if c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds <= 0 { + return fmt.Errorf("gateway.openai_ws.sticky_response_id_ttl_seconds must be positive") + } + if c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds < 0 { + return fmt.Errorf("gateway.openai_ws.sticky_previous_response_ttl_seconds must be non-negative") + } + if c.Gateway.OpenAIWS.SchedulerScoreWeights.Priority < 0 || + c.Gateway.OpenAIWS.SchedulerScoreWeights.Load < 0 || + c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue < 0 || + c.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate < 0 || + c.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT < 0 { + return fmt.Errorf("gateway.openai_ws.scheduler_score_weights.* must be non-negative") + } + weightSum := c.Gateway.OpenAIWS.SchedulerScoreWeights.Priority + + c.Gateway.OpenAIWS.SchedulerScoreWeights.Load + + c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue + + c.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate + + c.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT + if weightSum <= 0 { + return fmt.Errorf("gateway.openai_ws.scheduler_score_weights must not all be zero") + } if c.Gateway.MaxLineSize < 0 { return fmt.Errorf("gateway.max_line_size must be non-negative") } diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index b0402a3b8..77edbd024 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/spf13/viper" + "github.com/stretchr/testify/require" ) func resetViperWithJWTSecret(t *testing.T) { @@ -75,6 +76,58 @@ func TestLoadDefaultSchedulingConfig(t *testing.T) { } } +func TestLoadDefaultOpenAIWSConfig(t *testing.T) { + resetViperWithJWTSecret(t) + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Gateway.OpenAIWS.Enabled { + t.Fatalf("Gateway.OpenAIWS.Enabled = true, want false") + } + if !cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 { + t.Fatalf("Gateway.OpenAIWS.ResponsesWebsocketsV2 = false, want true") + } + if cfg.Gateway.OpenAIWS.ResponsesWebsockets { + t.Fatalf("Gateway.OpenAIWS.ResponsesWebsockets = true, want false") + } + if !cfg.Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled { + t.Fatalf("Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled = false, want true") + } + if cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor != 1.0 { + t.Fatalf("Gateway.OpenAIWS.OAuthMaxConnsFactor = %v, want 1.0", cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor) + } + if cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor != 1.0 { + t.Fatalf("Gateway.OpenAIWS.APIKeyMaxConnsFactor = %v, want 1.0", cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor) + } + if cfg.Gateway.OpenAIWS.StickySessionTTLSeconds != 3600 { + t.Fatalf("Gateway.OpenAIWS.StickySessionTTLSeconds = %d, want 3600", cfg.Gateway.OpenAIWS.StickySessionTTLSeconds) + } + if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds != 3600 { + t.Fatalf("Gateway.OpenAIWS.StickyResponseIDTTLSeconds = %d, want 3600", cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds) + } + if cfg.Gateway.OpenAIWS.FallbackCooldownSeconds != 30 { + t.Fatalf("Gateway.OpenAIWS.FallbackCooldownSeconds = %d, want 30", cfg.Gateway.OpenAIWS.FallbackCooldownSeconds) + } +} + +func TestLoadOpenAIWSStickyTTLCompatibility(t *testing.T) { + resetViperWithJWTSecret(t) + t.Setenv("GATEWAY_OPENAI_WS_STICKY_RESPONSE_ID_TTL_SECONDS", "0") + t.Setenv("GATEWAY_OPENAI_WS_STICKY_PREVIOUS_RESPONSE_TTL_SECONDS", "7200") + + cfg, err := Load() + if err != nil { + t.Fatalf("Load() error: %v", err) + } + + if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds != 7200 { + t.Fatalf("StickyResponseIDTTLSeconds = %d, want 7200", cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds) + } +} + func TestLoadDefaultIdempotencyConfig(t *testing.T) { resetViperWithJWTSecret(t) @@ -993,6 +1046,16 @@ func TestValidateConfigErrors(t *testing.T) { mutate: func(c *Config) { c.Gateway.StreamKeepaliveInterval = 4 }, wantErr: "gateway.stream_keepalive_interval", }, + { + name: "gateway openai ws oauth max conns factor", + mutate: func(c *Config) { c.Gateway.OpenAIWS.OAuthMaxConnsFactor = 0 }, + wantErr: "gateway.openai_ws.oauth_max_conns_factor", + }, + { + name: "gateway openai ws apikey max conns factor", + mutate: func(c *Config) { c.Gateway.OpenAIWS.APIKeyMaxConnsFactor = 0 }, + wantErr: "gateway.openai_ws.apikey_max_conns_factor", + }, { name: "gateway stream data interval range", mutate: func(c *Config) { c.Gateway.StreamDataIntervalTimeout = 5 }, @@ -1174,6 +1237,144 @@ func TestValidateConfigErrors(t *testing.T) { } } +func TestValidateConfig_OpenAIWSRules(t *testing.T) { + buildValid := func(t *testing.T) *Config { + t.Helper() + resetViperWithJWTSecret(t) + cfg, err := Load() + require.NoError(t, err) + return cfg + } + + t.Run("sticky response id ttl 兼容旧键回填", func(t *testing.T) { + cfg := buildValid(t) + cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 0 + cfg.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds = 7200 + + require.NoError(t, cfg.Validate()) + require.Equal(t, 7200, cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds) + }) + + cases := []struct { + name string + mutate func(*Config) + wantErr string + }{ + { + name: "max_conns_per_account 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.MaxConnsPerAccount = 0 }, + wantErr: "gateway.openai_ws.max_conns_per_account", + }, + { + name: "min_idle_per_account 不能为负数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.MinIdlePerAccount = -1 }, + wantErr: "gateway.openai_ws.min_idle_per_account", + }, + { + name: "max_idle_per_account 不能为负数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.MaxIdlePerAccount = -1 }, + wantErr: "gateway.openai_ws.max_idle_per_account", + }, + { + name: "min_idle_per_account 不能大于 max_idle_per_account", + mutate: func(c *Config) { + c.Gateway.OpenAIWS.MinIdlePerAccount = 3 + c.Gateway.OpenAIWS.MaxIdlePerAccount = 2 + }, + wantErr: "gateway.openai_ws.min_idle_per_account must be <= max_idle_per_account", + }, + { + name: "max_idle_per_account 不能大于 max_conns_per_account", + mutate: func(c *Config) { + c.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + c.Gateway.OpenAIWS.MaxIdlePerAccount = 3 + }, + wantErr: "gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account", + }, + { + name: "dial_timeout_seconds 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.DialTimeoutSeconds = 0 }, + wantErr: "gateway.openai_ws.dial_timeout_seconds", + }, + { + name: "read_timeout_seconds 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.ReadTimeoutSeconds = 0 }, + wantErr: "gateway.openai_ws.read_timeout_seconds", + }, + { + name: "write_timeout_seconds 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.WriteTimeoutSeconds = 0 }, + wantErr: "gateway.openai_ws.write_timeout_seconds", + }, + { + name: "pool_target_utilization 必须在 (0,1]", + mutate: func(c *Config) { c.Gateway.OpenAIWS.PoolTargetUtilization = 0 }, + wantErr: "gateway.openai_ws.pool_target_utilization", + }, + { + name: "queue_limit_per_conn 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.QueueLimitPerConn = 0 }, + wantErr: "gateway.openai_ws.queue_limit_per_conn", + }, + { + name: "fallback_cooldown_seconds 不能为负数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.FallbackCooldownSeconds = -1 }, + wantErr: "gateway.openai_ws.fallback_cooldown_seconds", + }, + { + name: "lb_top_k 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.LBTopK = 0 }, + wantErr: "gateway.openai_ws.lb_top_k", + }, + { + name: "sticky_session_ttl_seconds 必须为正数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.StickySessionTTLSeconds = 0 }, + wantErr: "gateway.openai_ws.sticky_session_ttl_seconds", + }, + { + name: "sticky_response_id_ttl_seconds 必须为正数", + mutate: func(c *Config) { + c.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 0 + c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds = 0 + }, + wantErr: "gateway.openai_ws.sticky_response_id_ttl_seconds", + }, + { + name: "sticky_previous_response_ttl_seconds 不能为负数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.StickyPreviousResponseTTLSeconds = -1 }, + wantErr: "gateway.openai_ws.sticky_previous_response_ttl_seconds", + }, + { + name: "scheduler_score_weights 不能为负数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue = -0.1 }, + wantErr: "gateway.openai_ws.scheduler_score_weights.* must be non-negative", + }, + { + name: "scheduler_score_weights 不能全为 0", + mutate: func(c *Config) { + c.Gateway.OpenAIWS.SchedulerScoreWeights.Priority = 0 + c.Gateway.OpenAIWS.SchedulerScoreWeights.Load = 0 + c.Gateway.OpenAIWS.SchedulerScoreWeights.Queue = 0 + c.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate = 0 + c.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT = 0 + }, + wantErr: "gateway.openai_ws.scheduler_score_weights must not all be zero", + }, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + cfg := buildValid(t) + tc.mutate(cfg) + + err := cfg.Validate() + require.Error(t, err) + require.Contains(t, err.Error(), tc.wantErr) + }) + } +} + func TestValidateConfig_AutoScaleDisabledIgnoreAutoScaleFields(t *testing.T) { resetViperWithJWTSecret(t) cfg, err := Load() diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index a1c240c77..cdbf5662e 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -133,6 +133,10 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } reqStream := streamResult.Bool() reqLog = reqLog.With(zap.String("model", reqModel), zap.Bool("stream", reqStream)) + previousResponseID := strings.TrimSpace(gjson.GetBytes(body, "previous_response_id").String()) + if previousResponseID != "" { + reqLog = reqLog.With(zap.Bool("has_previous_response_id", true)) + } setOpsRequestContext(c, reqModel, reqStream, body) @@ -246,7 +250,14 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { for { // Select account supporting the requested model reqLog.Debug("openai.account_selecting", zap.Int("excluded_account_count", len(failedAccountIDs))) - selection, err := h.gatewayService.SelectAccountWithLoadAwareness(c.Request.Context(), apiKey.GroupID, sessionHash, reqModel, failedAccountIDs) + selection, scheduleDecision, err := h.gatewayService.SelectAccountWithScheduler( + c.Request.Context(), + apiKey.GroupID, + previousResponseID, + sessionHash, + reqModel, + failedAccountIDs, + ) if err != nil { reqLog.Warn("openai.account_select_failed", zap.Error(err), @@ -263,6 +274,22 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } return } + if selection == nil || selection.Account == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) + return + } + if previousResponseID != "" && selection != nil && selection.Account != nil { + reqLog.Debug("openai.account_selected_with_previous_response_id", zap.Int64("account_id", selection.Account.ID)) + } + reqLog.Debug("openai.account_schedule_decision", + zap.String("layer", scheduleDecision.Layer), + zap.Bool("sticky_previous_hit", scheduleDecision.StickyPreviousHit), + zap.Bool("sticky_session_hit", scheduleDecision.StickySessionHit), + zap.Int("candidate_count", scheduleDecision.CandidateCount), + zap.Int("top_k", scheduleDecision.TopK), + zap.Int64("latency_ms", scheduleDecision.LatencyMs), + zap.Float64("load_skew", scheduleDecision.LoadSkew), + ) account := selection.Account reqLog.Debug("openai.account_selected", zap.Int64("account_id", account.ID), zap.String("account_name", account.Name)) setOpsSelectedAccount(c, account.ID, account.Platform) @@ -358,6 +385,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { if err != nil { var failoverErr *service.UpstreamFailoverError if errors.As(err, &failoverErr) { + h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil) + h.gatewayService.RecordOpenAIAccountSwitch() failedAccountIDs[account.ID] = struct{}{} lastFailoverErr = failoverErr if switchCount >= maxAccountSwitches { @@ -373,6 +402,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { ) continue } + h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil) wroteFallback := h.ensureForwardErrorResponse(c, streamStarted) reqLog.Error("openai.forward_failed", zap.Int64("account_id", account.ID), @@ -381,6 +411,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { ) return } + if result != nil { + h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, true, result.FirstTokenMs) + } else { + h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, true, nil) + } // 捕获请求信息(用于异步记录,避免在 goroutine 中访问 gin.Context) userAgent := c.GetHeader("User-Agent") diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index 51ab84dd4..1114c4502 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -714,6 +714,62 @@ func (a *Account) IsOpenAIPassthroughEnabled() bool { return false } +// IsOpenAIResponsesWebSocketV2Enabled 返回 OpenAI 账号是否开启 Responses WebSocket v2。 +// +// 分类型新字段: +// - OAuth 账号:accounts.extra.openai_oauth_responses_websockets_v2_enabled +// - API Key 账号:accounts.extra.openai_apikey_responses_websockets_v2_enabled +// +// 兼容字段: +// - accounts.extra.responses_websockets_v2_enabled +// - accounts.extra.openai_ws_enabled(历史开关) +// +// 优先级: +// 1. 按账号类型读取分类型字段 +// 2. 分类型字段缺失时,回退兼容字段 +func (a *Account) IsOpenAIResponsesWebSocketV2Enabled() bool { + if a == nil || !a.IsOpenAI() || a.Extra == nil { + return false + } + if a.IsOpenAIOAuth() { + if enabled, ok := a.Extra["openai_oauth_responses_websockets_v2_enabled"].(bool); ok { + return enabled + } + } + if a.IsOpenAIApiKey() { + if enabled, ok := a.Extra["openai_apikey_responses_websockets_v2_enabled"].(bool); ok { + return enabled + } + } + if enabled, ok := a.Extra["responses_websockets_v2_enabled"].(bool); ok { + return enabled + } + if enabled, ok := a.Extra["openai_ws_enabled"].(bool); ok { + return enabled + } + return false +} + +// IsOpenAIWSForceHTTPEnabled 返回账号级“强制 HTTP”开关。 +// 字段:accounts.extra.openai_ws_force_http。 +func (a *Account) IsOpenAIWSForceHTTPEnabled() bool { + if a == nil || !a.IsOpenAI() || a.Extra == nil { + return false + } + enabled, ok := a.Extra["openai_ws_force_http"].(bool) + return ok && enabled +} + +// IsOpenAIWSAllowStoreRecoveryEnabled 返回账号级 store 恢复开关。 +// 字段:accounts.extra.openai_ws_allow_store_recovery。 +func (a *Account) IsOpenAIWSAllowStoreRecoveryEnabled() bool { + if a == nil || !a.IsOpenAI() || a.Extra == nil { + return false + } + enabled, ok := a.Extra["openai_ws_allow_store_recovery"].(bool) + return ok && enabled +} + // IsOpenAIOAuthPassthroughEnabled 兼容旧接口,等价于 OAuth 账号的 IsOpenAIPassthroughEnabled。 func (a *Account) IsOpenAIOAuthPassthroughEnabled() bool { return a != nil && a.IsOpenAIOAuth() && a.IsOpenAIPassthroughEnabled() diff --git a/backend/internal/service/account_openai_passthrough_test.go b/backend/internal/service/account_openai_passthrough_test.go index 59f8cd8cc..15124c641 100644 --- a/backend/internal/service/account_openai_passthrough_test.go +++ b/backend/internal/service/account_openai_passthrough_test.go @@ -134,3 +134,102 @@ func TestAccount_IsCodexCLIOnlyEnabled(t *testing.T) { require.False(t, otherPlatform.IsCodexCLIOnlyEnabled()) }) } + +func TestAccount_IsOpenAIResponsesWebSocketV2Enabled(t *testing.T) { + t.Run("OAuth使用OAuth专用开关", func(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "openai_oauth_responses_websockets_v2_enabled": true, + }, + } + require.True(t, account.IsOpenAIResponsesWebSocketV2Enabled()) + }) + + t.Run("API Key使用API Key专用开关", func(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Extra: map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + }, + } + require.True(t, account.IsOpenAIResponsesWebSocketV2Enabled()) + }) + + t.Run("OAuth账号不会读取API Key专用开关", func(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + }, + } + require.False(t, account.IsOpenAIResponsesWebSocketV2Enabled()) + }) + + t.Run("分类型新键优先于兼容键", func(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "openai_oauth_responses_websockets_v2_enabled": false, + "responses_websockets_v2_enabled": true, + "openai_ws_enabled": true, + }, + } + require.False(t, account.IsOpenAIResponsesWebSocketV2Enabled()) + }) + + t.Run("分类型键缺失时回退兼容键", func(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + require.True(t, account.IsOpenAIResponsesWebSocketV2Enabled()) + }) + + t.Run("非OpenAI账号默认关闭", func(t *testing.T) { + account := &Account{ + Platform: PlatformAnthropic, + Type: AccountTypeAPIKey, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + require.False(t, account.IsOpenAIResponsesWebSocketV2Enabled()) + }) +} + +func TestAccount_OpenAIWSExtraFlags(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "openai_ws_force_http": true, + "openai_ws_allow_store_recovery": true, + }, + } + require.True(t, account.IsOpenAIWSForceHTTPEnabled()) + require.True(t, account.IsOpenAIWSAllowStoreRecoveryEnabled()) + + off := &Account{Platform: PlatformOpenAI, Type: AccountTypeOAuth, Extra: map[string]any{}} + require.False(t, off.IsOpenAIWSForceHTTPEnabled()) + require.False(t, off.IsOpenAIWSAllowStoreRecoveryEnabled()) + + var nilAccount *Account + require.False(t, nilAccount.IsOpenAIWSAllowStoreRecoveryEnabled()) + + nonOpenAI := &Account{ + Platform: PlatformAnthropic, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "openai_ws_allow_store_recovery": true, + }, + } + require.False(t, nonOpenAI.IsOpenAIWSAllowStoreRecoveryEnabled()) +} diff --git a/backend/internal/service/data_management_grpc.go b/backend/internal/service/data_management_grpc.go index 4aabdfec2..7ad4b5264 100644 --- a/backend/internal/service/data_management_grpc.go +++ b/backend/internal/service/data_management_grpc.go @@ -2,6 +2,7 @@ package service import ( "context" + "errors" "fmt" "net" "strings" @@ -12,6 +13,7 @@ import ( infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/metadata" grpcstatus "google.golang.org/grpc/status" @@ -519,19 +521,7 @@ func (s *DataManagementService) withClient(ctx context.Context, call func(contex } socketPath := s.SocketPath() - dialCtx, dialCancel := context.WithTimeout(ctx, s.dialTimeout) - defer dialCancel() - - conn, err := grpc.DialContext( - dialCtx, - "unix://"+socketPath, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { - dialer := net.Dialer{Timeout: s.dialTimeout} - return dialer.DialContext(ctx, "unix", socketPath) - }), - ) + conn, err := s.dialBackupAgent(ctx, socketPath) if err != nil { return ErrBackupAgentUnavailable.WithMetadata(map[string]string{"socket_path": socketPath}).WithCause(err) } @@ -880,19 +870,7 @@ func validateSourceProfileInput(sourceType, profileID, name string) error { func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataManagementAgentInfo, error) { socketPath := s.SocketPath() - dialCtx, dialCancel := context.WithTimeout(ctx, s.dialTimeout) - defer dialCancel() - - conn, err := grpc.DialContext( - dialCtx, - "unix://"+socketPath, - grpc.WithTransportCredentials(insecure.NewCredentials()), - grpc.WithBlock(), - grpc.WithContextDialer(func(ctx context.Context, _ string) (net.Conn, error) { - dialer := net.Dialer{Timeout: s.dialTimeout} - return dialer.DialContext(ctx, "unix", socketPath) - }), - ) + conn, err := s.dialBackupAgent(ctx, socketPath) if err != nil { return nil, err } @@ -920,6 +898,47 @@ func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataMan }, nil } +func (s *DataManagementService) dialBackupAgent(ctx context.Context, socketPath string) (*grpc.ClientConn, error) { + conn, err := grpc.NewClient( + "passthrough:///backup-agent", + grpc.WithTransportCredentials(insecure.NewCredentials()), + grpc.WithContextDialer(func(dialCtx context.Context, _ string) (net.Conn, error) { + dialer := net.Dialer{Timeout: s.dialTimeout} + return dialer.DialContext(dialCtx, "unix", socketPath) + }), + ) + if err != nil { + return nil, err + } + + timeout := s.dialTimeout + if timeout <= 0 { + return conn, nil + } + + waitCtx, waitCancel := context.WithTimeout(ctx, timeout) + defer waitCancel() + conn.Connect() + for { + state := conn.GetState() + if state == connectivity.Ready { + return conn, nil + } + if state == connectivity.Shutdown { + _ = conn.Close() + return nil, errors.New("backup agent grpc connection shutdown") + } + if conn.WaitForStateChange(waitCtx, state) { + continue + } + _ = conn.Close() + if waitErr := waitCtx.Err(); waitErr != nil { + return nil, waitErr + } + return nil, context.DeadlineExceeded + } +} + func requestIDFromContext(ctx context.Context) string { if ctx == nil { return "" diff --git a/backend/internal/service/data_management_grpc_test.go b/backend/internal/service/data_management_grpc_test.go index 5461f09ac..6de435e7b 100644 --- a/backend/internal/service/data_management_grpc_test.go +++ b/backend/internal/service/data_management_grpc_test.go @@ -1,12 +1,19 @@ package service import ( + "context" "errors" + "fmt" + "path/filepath" "testing" + "time" + backupv1 "github.com/Wei-Shaw/sub2api/internal/backup/proto/backup/v1" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" grpcstatus "google.golang.org/grpc/status" ) @@ -126,3 +133,59 @@ func TestValidateDataManagementConfig(t *testing.T) { s3EnabledMissingBucket.S3.Bucket = "" require.Error(t, validateDataManagementConfig(s3EnabledMissingBucket)) } + +func TestDataManagementService_DialBackupAgent_TimeoutDisabled(t *testing.T) { + t.Parallel() + + socketPath := filepath.Join("/tmp", fmt.Sprintf("s2dm0-%d.sock", time.Now().UnixNano())) + startTestBackupHealthServer(t, socketPath) + + svc := &DataManagementService{ + socketPath: socketPath, + dialTimeout: 0, + } + + conn, err := svc.dialBackupAgent(context.Background(), socketPath) + require.NoError(t, err) + require.NotNil(t, conn) + require.NoError(t, conn.Close()) +} + +func TestDataManagementService_DialBackupAgent_TimeoutExceeded(t *testing.T) { + t.Parallel() + + socketPath := filepath.Join(t.TempDir(), "missing.sock") + svc := &DataManagementService{ + socketPath: socketPath, + dialTimeout: 30 * time.Millisecond, + } + + conn, err := svc.dialBackupAgent(context.Background(), socketPath) + require.Nil(t, conn) + require.Error(t, err) + require.ErrorIs(t, err, context.DeadlineExceeded) +} + +func TestDataManagementService_WithClient_PassesRequestID(t *testing.T) { + t.Parallel() + + socketPath := filepath.Join("/tmp", fmt.Sprintf("s2dm1-%d.sock", time.Now().UnixNano())) + startTestBackupHealthServer(t, socketPath) + + svc := &DataManagementService{ + socketPath: socketPath, + dialTimeout: 200 * time.Millisecond, + } + + ctx := context.WithValue(context.Background(), ctxkey.RequestID, "req-data-management-1") + called := false + err := svc.withClient(ctx, func(callCtx context.Context, _ backupv1.BackupServiceClient) error { + called = true + md, ok := metadata.FromOutgoingContext(callCtx) + require.True(t, ok) + require.Equal(t, []string{"req-data-management-1"}, md.Get("x-request-id")) + return nil + }) + require.NoError(t, err) + require.True(t, called) +} diff --git a/backend/internal/service/openai_account_scheduler.go b/backend/internal/service/openai_account_scheduler.go new file mode 100644 index 000000000..25451e809 --- /dev/null +++ b/backend/internal/service/openai_account_scheduler.go @@ -0,0 +1,665 @@ +package service + +import ( + "context" + "errors" + "math" + "sort" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + openAIAccountScheduleLayerPreviousResponse = "previous_response_id" + openAIAccountScheduleLayerSessionSticky = "session_hash" + openAIAccountScheduleLayerLoadBalance = "load_balance" +) + +type OpenAIAccountScheduleRequest struct { + GroupID *int64 + SessionHash string + PreviousResponseID string + RequestedModel string + ExcludedIDs map[int64]struct{} +} + +type OpenAIAccountScheduleDecision struct { + Layer string + StickyPreviousHit bool + StickySessionHit bool + CandidateCount int + TopK int + LatencyMs int64 + LoadSkew float64 + SelectedAccountID int64 + SelectedAccountType string +} + +type OpenAIAccountSchedulerMetricsSnapshot struct { + SelectTotal int64 + StickyPreviousHitTotal int64 + StickySessionHitTotal int64 + LoadBalanceSelectTotal int64 + AccountSwitchTotal int64 + SchedulerLatencyMsTotal int64 + SchedulerLatencyMsAvg float64 + StickyHitRatio float64 + AccountSwitchRate float64 + LoadSkewAvg float64 + RuntimeStatsAccountCount int +} + +type OpenAIAccountScheduler interface { + Select(ctx context.Context, req OpenAIAccountScheduleRequest) (*AccountSelectionResult, OpenAIAccountScheduleDecision, error) + ReportResult(accountID int64, success bool, firstTokenMs *int) + ReportSwitch() + SnapshotMetrics() OpenAIAccountSchedulerMetricsSnapshot +} + +type openAIAccountSchedulerMetrics struct { + selectTotal atomic.Int64 + stickyPreviousHitTotal atomic.Int64 + stickySessionHitTotal atomic.Int64 + loadBalanceSelectTotal atomic.Int64 + accountSwitchTotal atomic.Int64 + latencyMsTotal atomic.Int64 + loadSkewMilliTotal atomic.Int64 +} + +func (m *openAIAccountSchedulerMetrics) recordSelect(decision OpenAIAccountScheduleDecision) { + if m == nil { + return + } + m.selectTotal.Add(1) + m.latencyMsTotal.Add(decision.LatencyMs) + m.loadSkewMilliTotal.Add(int64(math.Round(decision.LoadSkew * 1000))) + if decision.StickyPreviousHit { + m.stickyPreviousHitTotal.Add(1) + } + if decision.StickySessionHit { + m.stickySessionHitTotal.Add(1) + } + if decision.Layer == openAIAccountScheduleLayerLoadBalance { + m.loadBalanceSelectTotal.Add(1) + } +} + +func (m *openAIAccountSchedulerMetrics) recordSwitch() { + if m == nil { + return + } + m.accountSwitchTotal.Add(1) +} + +type openAIAccountRuntimeStats struct { + mu sync.RWMutex + accounts map[int64]*openAIAccountRuntimeStat +} + +type openAIAccountRuntimeStat struct { + errorRateEWMA float64 + ttftEWMA float64 + hasTTFT bool +} + +func newOpenAIAccountRuntimeStats() *openAIAccountRuntimeStats { + return &openAIAccountRuntimeStats{ + accounts: make(map[int64]*openAIAccountRuntimeStat, 64), + } +} + +func (s *openAIAccountRuntimeStats) report(accountID int64, success bool, firstTokenMs *int) { + if s == nil || accountID <= 0 { + return + } + const alpha = 0.2 + s.mu.Lock() + defer s.mu.Unlock() + + stat, ok := s.accounts[accountID] + if !ok { + stat = &openAIAccountRuntimeStat{} + s.accounts[accountID] = stat + } + + errorSample := 1.0 + if success { + errorSample = 0.0 + } + stat.errorRateEWMA = alpha*errorSample + (1-alpha)*stat.errorRateEWMA + + if firstTokenMs != nil && *firstTokenMs > 0 { + ttft := float64(*firstTokenMs) + if !stat.hasTTFT { + stat.ttftEWMA = ttft + stat.hasTTFT = true + } else { + stat.ttftEWMA = alpha*ttft + (1-alpha)*stat.ttftEWMA + } + } +} + +func (s *openAIAccountRuntimeStats) snapshot(accountID int64) (errorRate float64, ttft float64, hasTTFT bool) { + if s == nil || accountID <= 0 { + return 0, 0, false + } + s.mu.RLock() + defer s.mu.RUnlock() + stat, ok := s.accounts[accountID] + if !ok || stat == nil { + return 0, 0, false + } + return clamp01(stat.errorRateEWMA), stat.ttftEWMA, stat.hasTTFT +} + +func (s *openAIAccountRuntimeStats) size() int { + if s == nil { + return 0 + } + s.mu.RLock() + defer s.mu.RUnlock() + return len(s.accounts) +} + +type defaultOpenAIAccountScheduler struct { + service *OpenAIGatewayService + metrics openAIAccountSchedulerMetrics + stats *openAIAccountRuntimeStats +} + +func newDefaultOpenAIAccountScheduler(service *OpenAIGatewayService, stats *openAIAccountRuntimeStats) OpenAIAccountScheduler { + if stats == nil { + stats = newOpenAIAccountRuntimeStats() + } + return &defaultOpenAIAccountScheduler{ + service: service, + stats: stats, + } +} + +func (s *defaultOpenAIAccountScheduler) Select( + ctx context.Context, + req OpenAIAccountScheduleRequest, +) (*AccountSelectionResult, OpenAIAccountScheduleDecision, error) { + decision := OpenAIAccountScheduleDecision{} + start := time.Now() + defer func() { + decision.LatencyMs = time.Since(start).Milliseconds() + s.metrics.recordSelect(decision) + }() + + previousResponseID := strings.TrimSpace(req.PreviousResponseID) + if previousResponseID != "" { + selection, err := s.service.SelectAccountByPreviousResponseID( + ctx, + req.GroupID, + previousResponseID, + req.RequestedModel, + req.ExcludedIDs, + ) + if err != nil { + return nil, decision, err + } + if selection != nil && selection.Account != nil { + decision.Layer = openAIAccountScheduleLayerPreviousResponse + decision.StickyPreviousHit = true + decision.SelectedAccountID = selection.Account.ID + decision.SelectedAccountType = selection.Account.Type + if req.SessionHash != "" { + _ = s.service.BindStickySession(ctx, req.GroupID, req.SessionHash, selection.Account.ID) + } + return selection, decision, nil + } + } + + selection, err := s.selectBySessionHash(ctx, req) + if err != nil { + return nil, decision, err + } + if selection != nil && selection.Account != nil { + decision.Layer = openAIAccountScheduleLayerSessionSticky + decision.StickySessionHit = true + decision.SelectedAccountID = selection.Account.ID + decision.SelectedAccountType = selection.Account.Type + return selection, decision, nil + } + + selection, candidateCount, topK, loadSkew, err := s.selectByLoadBalance(ctx, req) + decision.Layer = openAIAccountScheduleLayerLoadBalance + decision.CandidateCount = candidateCount + decision.TopK = topK + decision.LoadSkew = loadSkew + if err != nil { + return nil, decision, err + } + if selection != nil && selection.Account != nil { + decision.SelectedAccountID = selection.Account.ID + decision.SelectedAccountType = selection.Account.Type + } + return selection, decision, nil +} + +func (s *defaultOpenAIAccountScheduler) selectBySessionHash( + ctx context.Context, + req OpenAIAccountScheduleRequest, +) (*AccountSelectionResult, error) { + sessionHash := strings.TrimSpace(req.SessionHash) + if sessionHash == "" || s == nil || s.service == nil || s.service.cache == nil { + return nil, nil + } + + cacheKey := "openai:" + sessionHash + accountID, err := s.service.cache.GetSessionAccountID(ctx, derefGroupID(req.GroupID), cacheKey) + if err != nil || accountID <= 0 { + return nil, nil + } + if req.ExcludedIDs != nil { + if _, excluded := req.ExcludedIDs[accountID]; excluded { + return nil, nil + } + } + + account, err := s.service.getSchedulableAccount(ctx, accountID) + if err != nil || account == nil { + _ = s.service.cache.DeleteSessionAccountID(ctx, derefGroupID(req.GroupID), cacheKey) + return nil, nil + } + if shouldClearStickySession(account, req.RequestedModel) || !account.IsOpenAI() { + _ = s.service.cache.DeleteSessionAccountID(ctx, derefGroupID(req.GroupID), cacheKey) + return nil, nil + } + if req.RequestedModel != "" && !account.IsModelSupported(req.RequestedModel) { + return nil, nil + } + + result, acquireErr := s.service.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if acquireErr == nil && result.Acquired { + _ = s.service.cache.RefreshSessionTTL( + ctx, + derefGroupID(req.GroupID), + cacheKey, + s.service.openAIWSSessionStickyTTL(), + ) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + + cfg := s.service.schedulingConfig() + if s.service.concurrencyService != nil { + waitingCount, _ := s.service.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return nil, nil +} + +type openAIAccountCandidateScore struct { + account *Account + loadInfo *AccountLoadInfo + score float64 + errorRate float64 + ttft float64 + hasTTFT bool +} + +func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( + ctx context.Context, + req OpenAIAccountScheduleRequest, +) (*AccountSelectionResult, int, int, float64, error) { + accounts, err := s.service.listSchedulableAccounts(ctx, req.GroupID) + if err != nil { + return nil, 0, 0, 0, err + } + if len(accounts) == 0 { + return nil, 0, 0, 0, errors.New("no available OpenAI accounts") + } + + filtered := make([]*Account, 0, len(accounts)) + for i := range accounts { + account := &accounts[i] + if req.ExcludedIDs != nil { + if _, excluded := req.ExcludedIDs[account.ID]; excluded { + continue + } + } + if !account.IsSchedulable() || !account.IsOpenAI() { + continue + } + if req.RequestedModel != "" && !account.IsModelSupported(req.RequestedModel) { + continue + } + filtered = append(filtered, account) + } + if len(filtered) == 0 { + return nil, 0, 0, 0, errors.New("no available OpenAI accounts") + } + + loadMap := map[int64]*AccountLoadInfo{} + if s.service.concurrencyService != nil { + loadReq := make([]AccountWithConcurrency, 0, len(filtered)) + for _, account := range filtered { + loadReq = append(loadReq, AccountWithConcurrency{ + ID: account.ID, + MaxConcurrency: account.Concurrency, + }) + } + if batchLoad, loadErr := s.service.concurrencyService.GetAccountsLoadBatch(ctx, loadReq); loadErr == nil { + loadMap = batchLoad + } + } + + minPriority, maxPriority := filtered[0].Priority, filtered[0].Priority + maxWaiting := 1 + loadRates := make([]float64, 0, len(filtered)) + ttftSamples := make([]float64, 0, len(filtered)) + candidates := make([]openAIAccountCandidateScore, 0, len(filtered)) + for _, account := range filtered { + loadInfo := loadMap[account.ID] + if loadInfo == nil { + loadInfo = &AccountLoadInfo{AccountID: account.ID} + } + if account.Priority < minPriority { + minPriority = account.Priority + } + if account.Priority > maxPriority { + maxPriority = account.Priority + } + if loadInfo.WaitingCount > maxWaiting { + maxWaiting = loadInfo.WaitingCount + } + errorRate, ttft, hasTTFT := s.stats.snapshot(account.ID) + if hasTTFT && ttft > 0 { + ttftSamples = append(ttftSamples, ttft) + } + loadRates = append(loadRates, float64(loadInfo.LoadRate)) + candidates = append(candidates, openAIAccountCandidateScore{ + account: account, + loadInfo: loadInfo, + errorRate: errorRate, + ttft: ttft, + hasTTFT: hasTTFT, + }) + } + + minTTFT, maxTTFT := 0.0, 0.0 + if len(ttftSamples) > 0 { + minTTFT, maxTTFT = ttftSamples[0], ttftSamples[0] + for _, sample := range ttftSamples[1:] { + if sample < minTTFT { + minTTFT = sample + } + if sample > maxTTFT { + maxTTFT = sample + } + } + } + + weights := s.service.openAIWSSchedulerWeights() + for i := range candidates { + item := &candidates[i] + priorityFactor := 1.0 + if maxPriority > minPriority { + priorityFactor = 1 - float64(item.account.Priority-minPriority)/float64(maxPriority-minPriority) + } + loadFactor := 1 - clamp01(float64(item.loadInfo.LoadRate)/100.0) + queueFactor := 1 - clamp01(float64(item.loadInfo.WaitingCount)/float64(maxWaiting)) + errorFactor := 1 - clamp01(item.errorRate) + ttftFactor := 0.5 + if item.hasTTFT && maxTTFT > minTTFT { + ttftFactor = 1 - clamp01((item.ttft-minTTFT)/(maxTTFT-minTTFT)) + } + + item.score = weights.Priority*priorityFactor + + weights.Load*loadFactor + + weights.Queue*queueFactor + + weights.ErrorRate*errorFactor + + weights.TTFT*ttftFactor + } + + sort.SliceStable(candidates, func(i, j int) bool { + left := candidates[i] + right := candidates[j] + if left.score != right.score { + return left.score > right.score + } + if left.account.Priority != right.account.Priority { + return left.account.Priority < right.account.Priority + } + if left.loadInfo.LoadRate != right.loadInfo.LoadRate { + return left.loadInfo.LoadRate < right.loadInfo.LoadRate + } + if left.loadInfo.WaitingCount != right.loadInfo.WaitingCount { + return left.loadInfo.WaitingCount < right.loadInfo.WaitingCount + } + return left.account.ID < right.account.ID + }) + + topK := s.service.openAIWSLBTopK() + if topK > len(candidates) { + topK = len(candidates) + } + if topK <= 0 { + topK = 1 + } + + for i := 0; i < topK; i++ { + candidate := candidates[i] + result, acquireErr := s.service.tryAcquireAccountSlot(ctx, candidate.account.ID, candidate.account.Concurrency) + if acquireErr != nil { + return nil, len(candidates), topK, calcLoadSkew(loadRates), acquireErr + } + if result != nil && result.Acquired { + if req.SessionHash != "" { + _ = s.service.BindStickySession(ctx, req.GroupID, req.SessionHash, candidate.account.ID) + } + return &AccountSelectionResult{ + Account: candidate.account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, len(candidates), topK, calcLoadSkew(loadRates), nil + } + } + + cfg := s.service.schedulingConfig() + candidate := candidates[0] + return &AccountSelectionResult{ + Account: candidate.account, + WaitPlan: &AccountWaitPlan{ + AccountID: candidate.account.ID, + MaxConcurrency: candidate.account.Concurrency, + Timeout: cfg.FallbackWaitTimeout, + MaxWaiting: cfg.FallbackMaxWaiting, + }, + }, len(candidates), topK, calcLoadSkew(loadRates), nil +} + +func (s *defaultOpenAIAccountScheduler) ReportResult(accountID int64, success bool, firstTokenMs *int) { + if s == nil || s.stats == nil { + return + } + s.stats.report(accountID, success, firstTokenMs) +} + +func (s *defaultOpenAIAccountScheduler) ReportSwitch() { + if s == nil { + return + } + s.metrics.recordSwitch() +} + +func (s *defaultOpenAIAccountScheduler) SnapshotMetrics() OpenAIAccountSchedulerMetricsSnapshot { + if s == nil { + return OpenAIAccountSchedulerMetricsSnapshot{} + } + + selectTotal := s.metrics.selectTotal.Load() + prevHit := s.metrics.stickyPreviousHitTotal.Load() + sessionHit := s.metrics.stickySessionHitTotal.Load() + switchTotal := s.metrics.accountSwitchTotal.Load() + latencyTotal := s.metrics.latencyMsTotal.Load() + loadSkewTotal := s.metrics.loadSkewMilliTotal.Load() + + snapshot := OpenAIAccountSchedulerMetricsSnapshot{ + SelectTotal: selectTotal, + StickyPreviousHitTotal: prevHit, + StickySessionHitTotal: sessionHit, + LoadBalanceSelectTotal: s.metrics.loadBalanceSelectTotal.Load(), + AccountSwitchTotal: switchTotal, + SchedulerLatencyMsTotal: latencyTotal, + RuntimeStatsAccountCount: s.stats.size(), + } + if selectTotal > 0 { + snapshot.SchedulerLatencyMsAvg = float64(latencyTotal) / float64(selectTotal) + snapshot.StickyHitRatio = float64(prevHit+sessionHit) / float64(selectTotal) + snapshot.AccountSwitchRate = float64(switchTotal) / float64(selectTotal) + snapshot.LoadSkewAvg = float64(loadSkewTotal) / 1000 / float64(selectTotal) + } + return snapshot +} + +func (s *OpenAIGatewayService) getOpenAIAccountScheduler() OpenAIAccountScheduler { + if s == nil { + return nil + } + s.openaiWSInitMu.Lock() + defer s.openaiWSInitMu.Unlock() + if s.openaiAccountStats == nil { + s.openaiAccountStats = newOpenAIAccountRuntimeStats() + } + if s.openaiScheduler == nil { + s.openaiScheduler = newDefaultOpenAIAccountScheduler(s, s.openaiAccountStats) + } + return s.openaiScheduler +} + +func (s *OpenAIGatewayService) SelectAccountWithScheduler( + ctx context.Context, + groupID *int64, + previousResponseID string, + sessionHash string, + requestedModel string, + excludedIDs map[int64]struct{}, +) (*AccountSelectionResult, OpenAIAccountScheduleDecision, error) { + decision := OpenAIAccountScheduleDecision{} + scheduler := s.getOpenAIAccountScheduler() + if scheduler == nil { + selection, err := s.SelectAccountWithLoadAwareness(ctx, groupID, sessionHash, requestedModel, excludedIDs) + decision.Layer = openAIAccountScheduleLayerLoadBalance + return selection, decision, err + } + return scheduler.Select(ctx, OpenAIAccountScheduleRequest{ + GroupID: groupID, + SessionHash: sessionHash, + PreviousResponseID: previousResponseID, + RequestedModel: requestedModel, + ExcludedIDs: excludedIDs, + }) +} + +func (s *OpenAIGatewayService) ReportOpenAIAccountScheduleResult(accountID int64, success bool, firstTokenMs *int) { + scheduler := s.getOpenAIAccountScheduler() + if scheduler == nil { + return + } + scheduler.ReportResult(accountID, success, firstTokenMs) +} + +func (s *OpenAIGatewayService) RecordOpenAIAccountSwitch() { + scheduler := s.getOpenAIAccountScheduler() + if scheduler == nil { + return + } + scheduler.ReportSwitch() +} + +func (s *OpenAIGatewayService) SnapshotOpenAIAccountSchedulerMetrics() OpenAIAccountSchedulerMetricsSnapshot { + scheduler := s.getOpenAIAccountScheduler() + if scheduler == nil { + return OpenAIAccountSchedulerMetricsSnapshot{} + } + return scheduler.SnapshotMetrics() +} + +func (s *OpenAIGatewayService) openAIWSSessionStickyTTL() time.Duration { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.StickySessionTTLSeconds > 0 { + return time.Duration(s.cfg.Gateway.OpenAIWS.StickySessionTTLSeconds) * time.Second + } + return openaiStickySessionTTL +} + +func (s *OpenAIGatewayService) openAIWSLBTopK() int { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.LBTopK > 0 { + return s.cfg.Gateway.OpenAIWS.LBTopK + } + return 3 +} + +func (s *OpenAIGatewayService) openAIWSSchedulerWeights() GatewayOpenAIWSSchedulerScoreWeightsView { + if s != nil && s.cfg != nil { + return GatewayOpenAIWSSchedulerScoreWeightsView{ + Priority: s.cfg.Gateway.OpenAIWS.SchedulerScoreWeights.Priority, + Load: s.cfg.Gateway.OpenAIWS.SchedulerScoreWeights.Load, + Queue: s.cfg.Gateway.OpenAIWS.SchedulerScoreWeights.Queue, + ErrorRate: s.cfg.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate, + TTFT: s.cfg.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT, + } + } + return GatewayOpenAIWSSchedulerScoreWeightsView{ + Priority: 1.0, + Load: 1.0, + Queue: 0.7, + ErrorRate: 0.8, + TTFT: 0.5, + } +} + +type GatewayOpenAIWSSchedulerScoreWeightsView struct { + Priority float64 + Load float64 + Queue float64 + ErrorRate float64 + TTFT float64 +} + +func clamp01(value float64) float64 { + switch { + case value < 0: + return 0 + case value > 1: + return 1 + default: + return value + } +} + +func calcLoadSkew(loadRates []float64) float64 { + if len(loadRates) <= 1 { + return 0 + } + sum := 0.0 + for _, value := range loadRates { + sum += value + } + mean := sum / float64(len(loadRates)) + variance := 0.0 + for _, value := range loadRates { + diff := value - mean + variance += diff * diff + } + variance /= float64(len(loadRates)) + return math.Sqrt(variance) +} diff --git a/backend/internal/service/openai_account_scheduler_test.go b/backend/internal/service/openai_account_scheduler_test.go new file mode 100644 index 000000000..13b74c1bd --- /dev/null +++ b/backend/internal/service/openai_account_scheduler_test.go @@ -0,0 +1,275 @@ +package service + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestOpenAIGatewayService_SelectAccountWithScheduler_PreviousResponseSticky(t *testing.T) { + ctx := context.Background() + groupID := int64(9) + account := Account{ + ID: 1001, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 2, + Extra: map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + }, + } + cache := &stubGatewayCache{} + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.StickySessionTTLSeconds = 1800 + cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 3600 + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + store := svc.getOpenAIWSStateStore() + require.NoError(t, store.BindResponseAccount(ctx, groupID, "resp_prev_001", account.ID, time.Hour)) + + selection, decision, err := svc.SelectAccountWithScheduler( + ctx, + &groupID, + "resp_prev_001", + "session_hash_001", + "gpt-5.1", + nil, + ) + require.NoError(t, err) + require.NotNil(t, selection) + require.NotNil(t, selection.Account) + require.Equal(t, account.ID, selection.Account.ID) + require.Equal(t, openAIAccountScheduleLayerPreviousResponse, decision.Layer) + require.True(t, decision.StickyPreviousHit) + require.Equal(t, account.ID, cache.sessionBindings["openai:session_hash_001"]) + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAIGatewayService_SelectAccountWithScheduler_SessionSticky(t *testing.T) { + ctx := context.Background() + groupID := int64(10) + account := Account{ + ID: 2001, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{ + "openai:session_hash_abc": account.ID, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: &config.Config{}, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + selection, decision, err := svc.SelectAccountWithScheduler( + ctx, + &groupID, + "", + "session_hash_abc", + "gpt-5.1", + nil, + ) + require.NoError(t, err) + require.NotNil(t, selection) + require.NotNil(t, selection.Account) + require.Equal(t, account.ID, selection.Account.ID) + require.Equal(t, openAIAccountScheduleLayerSessionSticky, decision.Layer) + require.True(t, decision.StickySessionHit) + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAIGatewayService_SelectAccountWithScheduler_SessionSticky_ForceHTTP(t *testing.T) { + ctx := context.Background() + groupID := int64(1010) + account := Account{ + ID: 2101, + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Extra: map[string]any{ + "openai_ws_force_http": true, + }, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{ + "openai:session_hash_force_http": account.ID, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: &config.Config{}, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + selection, decision, err := svc.SelectAccountWithScheduler( + ctx, + &groupID, + "", + "session_hash_force_http", + "gpt-5.1", + nil, + ) + require.NoError(t, err) + require.NotNil(t, selection) + require.NotNil(t, selection.Account) + require.Equal(t, account.ID, selection.Account.ID) + require.Equal(t, openAIAccountScheduleLayerSessionSticky, decision.Layer) + require.True(t, decision.StickySessionHit) + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAIGatewayService_SelectAccountWithScheduler_LoadBalanceTopKFallback(t *testing.T) { + ctx := context.Background() + groupID := int64(11) + accounts := []Account{ + { + ID: 3001, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + }, + { + ID: 3002, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + }, + { + ID: 3003, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Priority: 0, + }, + } + + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.LBTopK = 2 + cfg.Gateway.OpenAIWS.SchedulerScoreWeights.Priority = 0.4 + cfg.Gateway.OpenAIWS.SchedulerScoreWeights.Load = 1.0 + cfg.Gateway.OpenAIWS.SchedulerScoreWeights.Queue = 1.0 + cfg.Gateway.OpenAIWS.SchedulerScoreWeights.ErrorRate = 0.2 + cfg.Gateway.OpenAIWS.SchedulerScoreWeights.TTFT = 0.1 + + concurrencyCache := stubConcurrencyCache{ + loadMap: map[int64]*AccountLoadInfo{ + 3001: {AccountID: 3001, LoadRate: 95, WaitingCount: 8}, + 3002: {AccountID: 3002, LoadRate: 20, WaitingCount: 1}, + 3003: {AccountID: 3003, LoadRate: 10, WaitingCount: 0}, + }, + acquireResults: map[int64]bool{ + 3003: false, // top1 失败,必须回退到 top-K 的下一候选 + 3002: true, + }, + } + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: accounts}, + cache: &stubGatewayCache{}, + cfg: cfg, + concurrencyService: NewConcurrencyService(concurrencyCache), + } + + selection, decision, err := svc.SelectAccountWithScheduler( + ctx, + &groupID, + "", + "", + "gpt-5.1", + nil, + ) + require.NoError(t, err) + require.NotNil(t, selection) + require.NotNil(t, selection.Account) + require.Equal(t, int64(3002), selection.Account.ID) + require.Equal(t, openAIAccountScheduleLayerLoadBalance, decision.Layer) + require.Equal(t, 3, decision.CandidateCount) + require.Equal(t, 2, decision.TopK) + require.Greater(t, decision.LoadSkew, 0.0) + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAIGatewayService_OpenAIAccountSchedulerMetrics(t *testing.T) { + ctx := context.Background() + groupID := int64(12) + account := Account{ + ID: 4001, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + } + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{ + "openai:session_hash_metrics": account.ID, + }, + } + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: &config.Config{}, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + } + + selection, _, err := svc.SelectAccountWithScheduler(ctx, &groupID, "", "session_hash_metrics", "gpt-5.1", nil) + require.NoError(t, err) + require.NotNil(t, selection) + svc.ReportOpenAIAccountScheduleResult(account.ID, true, intPtrForTest(120)) + svc.RecordOpenAIAccountSwitch() + + snapshot := svc.SnapshotOpenAIAccountSchedulerMetrics() + require.GreaterOrEqual(t, snapshot.SelectTotal, int64(1)) + require.GreaterOrEqual(t, snapshot.StickySessionHitTotal, int64(1)) + require.GreaterOrEqual(t, snapshot.AccountSwitchTotal, int64(1)) + require.GreaterOrEqual(t, snapshot.SchedulerLatencyMsAvg, float64(0)) + require.GreaterOrEqual(t, snapshot.StickyHitRatio, 0.0) + require.GreaterOrEqual(t, snapshot.RuntimeStatsAccountCount, 1) +} + +func intPtrForTest(v int) *int { + return &v +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index f26ce03f0..da818de49 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -14,6 +14,7 @@ import ( "sort" "strconv" "strings" + "sync" "sync/atomic" "time" @@ -44,25 +45,29 @@ const ( // OpenAI allowed headers whitelist (for non-passthrough). var openaiAllowedHeaders = map[string]bool{ - "accept-language": true, - "content-type": true, - "conversation_id": true, - "user-agent": true, - "originator": true, - "session_id": true, + "accept-language": true, + "content-type": true, + "conversation_id": true, + "user-agent": true, + "originator": true, + "session_id": true, + "x-codex-turn-state": true, + "x-codex-turn-metadata": true, } // OpenAI passthrough allowed headers whitelist. // 透传模式下仅放行这些低风险请求头,避免将非标准/环境噪声头传给上游触发风控。 var openaiPassthroughAllowedHeaders = map[string]bool{ - "accept": true, - "accept-language": true, - "content-type": true, - "conversation_id": true, - "openai-beta": true, - "user-agent": true, - "originator": true, - "session_id": true, + "accept": true, + "accept-language": true, + "content-type": true, + "conversation_id": true, + "openai-beta": true, + "user-agent": true, + "originator": true, + "session_id": true, + "x-codex-turn-state": true, + "x-codex-turn-metadata": true, } // codex_cli_only 拒绝时记录的请求头白名单(仅用于诊断日志,不参与上游透传) @@ -218,6 +223,16 @@ type OpenAIGatewayService struct { deferredService *DeferredService openAITokenProvider *OpenAITokenProvider toolCorrector *CodexToolCorrector + openaiWSResolver OpenAIWSProtocolResolver + + openaiWSInitMu sync.Mutex + openaiWSPool *openAIWSConnPool + openaiWSStateStore OpenAIWSStateStore + openaiScheduler OpenAIAccountScheduler + openaiAccountStats *openAIAccountRuntimeStats + + openaiWSFallbackMu sync.Mutex + openaiWSFallbackUntil map[int64]time.Time } // NewOpenAIGatewayService creates a new OpenAIGatewayService @@ -254,6 +269,7 @@ func NewOpenAIGatewayService( deferredService: deferredService, openAITokenProvider: openAITokenProvider, toolCorrector: NewCodexToolCorrector(), + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), } } @@ -268,6 +284,17 @@ func (s *OpenAIGatewayService) getCodexClientRestrictionDetector() CodexClientRe return NewOpenAICodexClientRestrictionDetector(cfg) } +func (s *OpenAIGatewayService) getOpenAIWSProtocolResolver() OpenAIWSProtocolResolver { + if s != nil && s.openaiWSResolver != nil { + return s.openaiWSResolver + } + var cfg *config.Config + if s != nil { + cfg = s.cfg + } + return NewOpenAIWSProtocolResolver(cfg) +} + func (s *OpenAIGatewayService) detectCodexClientRestriction(c *gin.Context, account *Account) CodexClientRestrictionDetectionResult { return s.getCodexClientRestrictionDetector().Detect(c, account) } @@ -503,7 +530,11 @@ func (s *OpenAIGatewayService) BindStickySession(ctx context.Context, groupID *i if sessionHash == "" || accountID <= 0 { return nil } - return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, accountID, openaiStickySessionTTL) + ttl := openaiStickySessionTTL + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.StickySessionTTLSeconds > 0 { + ttl = time.Duration(s.cfg.Gateway.OpenAIWS.StickySessionTTLSeconds) * time.Second + } + return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, accountID, ttl) } // SelectAccount selects an OpenAI account with sticky session support @@ -1010,6 +1041,23 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco originalModel := reqModel isCodexCLI := openai.IsCodexCLIRequest(c.GetHeader("User-Agent")) || (s.cfg != nil && s.cfg.Gateway.ForceCodexCLI) + wsDecision := s.getOpenAIWSProtocolResolver().Resolve(account) + if c != nil { + c.Set("openai_ws_transport_decision", string(wsDecision.Transport)) + c.Set("openai_ws_transport_reason", wsDecision.Reason) + } + // 当前仅支持 WSv2;WSv1 命中时直接返回错误,避免出现“配置可开但行为不确定”。 + if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocket { + if c != nil { + c.JSON(http.StatusBadRequest, gin.H{ + "error": gin.H{ + "type": "invalid_request_error", + "message": "OpenAI WSv1 is temporarily unsupported. Please enable responses_websockets_v2.", + }, + }) + } + return nil, errors.New("openai ws v1 is temporarily unsupported; use ws v2") + } passthroughEnabled := account.IsOpenAIPassthroughEnabled() if passthroughEnabled { // 透传分支只需要轻量提取字段,避免热路径全量 Unmarshal。 @@ -1126,7 +1174,8 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco } // Remove unsupported fields (not supported by upstream OpenAI API) - for _, unsupportedField := range []string{"prompt_cache_retention", "safety_identifier", "previous_response_id"} { + unsupportedFields := []string{"prompt_cache_retention", "safety_identifier"} + for _, unsupportedField := range unsupportedFields { if _, has := reqBody[unsupportedField]; has { delete(reqBody, unsupportedField) bodyModified = true @@ -1134,6 +1183,15 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco } } + // 仅在 WSv2 模式保留 previous_response_id,其他模式(HTTP/WSv1)统一过滤。 + // 注意:该规则同样适用于 Codex CLI 请求,避免 WSv1 向上游透传不支持字段。 + if wsDecision.Transport != OpenAIUpstreamTransportResponsesWebsocketV2 { + if _, has := reqBody["previous_response_id"]; has { + delete(reqBody, "previous_response_id") + bodyModified = true + } + } + // Re-serialize body only if modified if bodyModified { var err error @@ -1149,6 +1207,60 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco return nil, err } + // Capture upstream request body for ops retry of this attempt. + setOpsUpstreamRequestBody(c, body) + + // 命中 WS 时优先走 WebSocket Mode;仅在“未写下游”且可恢复错误时回退 HTTP。 + if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocketV2 { + if s.isOpenAIWSFallbackCooling(account.ID) { + if c != nil { + c.Set("openai_ws_fallback_cooling", true) + } + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "ws_cooling", + Message: "openai ws fallback cooling", + }) + s.logOpenAIWSFallback(ctx, account, "fallback_cooling", nil) + } else { + wsResult, wsErr := s.forwardOpenAIWSV2( + ctx, + c, + account, + reqBody, + token, + wsDecision, + isCodexCLI, + reqStream, + originalModel, + mappedModel, + startTime, + ) + if wsErr == nil { + s.clearOpenAIWSFallbackCooling(account.ID) + return wsResult, nil + } + var fallbackErr *openAIWSFallbackError + if errors.As(wsErr, &fallbackErr) && (c == nil || c.Writer == nil || !c.Writer.Written()) { + s.markOpenAIWSFallbackCooling(account.ID, fallbackErr.Reason) + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: 0, + Kind: "ws_fallback", + Message: fallbackErr.Reason, + }) + s.logOpenAIWSFallback(ctx, account, fallbackErr.Reason, fallbackErr.Err) + } else { + return nil, wsErr + } + } + } + // Build upstream request upstreamReq, err := s.buildUpstreamRequest(ctx, c, account, body, token, reqStream, promptCacheKey, isCodexCLI) if err != nil { @@ -1161,9 +1273,6 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco proxyURL = account.Proxy.URL() } - // Capture upstream request body for ops retry of this attempt. - setOpsUpstreamRequestBody(c, body) - // Send request upstreamStart := time.Now() resp, err := s.httpUpstream.Do(upstreamReq, proxyURL, account.ID, account.Concurrency) diff --git a/backend/internal/service/openai_ws_account_sticky_test.go b/backend/internal/service/openai_ws_account_sticky_test.go new file mode 100644 index 000000000..4b0587038 --- /dev/null +++ b/backend/internal/service/openai_ws_account_sticky_test.go @@ -0,0 +1,124 @@ +package service + +import ( + "context" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestOpenAIGatewayService_SelectAccountByPreviousResponseID_Hit(t *testing.T) { + ctx := context.Background() + groupID := int64(23) + account := Account{ + ID: 2, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 2, + Extra: map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + }, + } + cache := &stubGatewayCache{} + store := NewOpenAIWSStateStore(cache) + cfg := newOpenAIWSV2TestConfig() + + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + openaiWSStateStore: store, + } + + require.NoError(t, store.BindResponseAccount(ctx, groupID, "resp_prev_1", account.ID, time.Hour)) + + selection, err := svc.SelectAccountByPreviousResponseID(ctx, &groupID, "resp_prev_1", "gpt-5.1", nil) + require.NoError(t, err) + require.NotNil(t, selection) + require.NotNil(t, selection.Account) + require.Equal(t, account.ID, selection.Account.ID) + require.True(t, selection.Acquired) + if selection.ReleaseFunc != nil { + selection.ReleaseFunc() + } +} + +func TestOpenAIGatewayService_SelectAccountByPreviousResponseID_Excluded(t *testing.T) { + ctx := context.Background() + groupID := int64(23) + account := Account{ + ID: 8, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Extra: map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + }, + } + cache := &stubGatewayCache{} + store := NewOpenAIWSStateStore(cache) + cfg := newOpenAIWSV2TestConfig() + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + openaiWSStateStore: store, + } + + require.NoError(t, store.BindResponseAccount(ctx, groupID, "resp_prev_2", account.ID, time.Hour)) + + selection, err := svc.SelectAccountByPreviousResponseID(ctx, &groupID, "resp_prev_2", "gpt-5.1", map[int64]struct{}{account.ID: {}}) + require.NoError(t, err) + require.Nil(t, selection) +} + +func TestOpenAIGatewayService_SelectAccountByPreviousResponseID_ForceHTTPIgnored(t *testing.T) { + ctx := context.Background() + groupID := int64(23) + account := Account{ + ID: 11, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Extra: map[string]any{ + "openai_ws_force_http": true, + "responses_websockets_v2_enabled": true, + }, + } + cache := &stubGatewayCache{} + store := NewOpenAIWSStateStore(cache) + cfg := newOpenAIWSV2TestConfig() + svc := &OpenAIGatewayService{ + accountRepo: stubOpenAIAccountRepo{accounts: []Account{account}}, + cache: cache, + cfg: cfg, + concurrencyService: NewConcurrencyService(stubConcurrencyCache{}), + openaiWSStateStore: store, + } + + require.NoError(t, store.BindResponseAccount(ctx, groupID, "resp_prev_force_http", account.ID, time.Hour)) + + selection, err := svc.SelectAccountByPreviousResponseID(ctx, &groupID, "resp_prev_force_http", "gpt-5.1", nil) + require.NoError(t, err) + require.Nil(t, selection, "force_http 场景应忽略 previous_response_id 粘连") +} + +func newOpenAIWSV2TestConfig() *config.Config { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 3600 + return cfg +} diff --git a/backend/internal/service/openai_ws_client.go b/backend/internal/service/openai_ws_client.go new file mode 100644 index 000000000..1fda7261d --- /dev/null +++ b/backend/internal/service/openai_ws_client.go @@ -0,0 +1,129 @@ +package service + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + + coderws "github.com/coder/websocket" + "github.com/coder/websocket/wsjson" +) + +// openAIWSClientConn 抽象 WS 客户端连接,便于替换底层实现。 +type openAIWSClientConn interface { + WriteJSON(ctx context.Context, value any) error + ReadMessage(ctx context.Context) ([]byte, error) + Ping(ctx context.Context) error + Close() error +} + +// openAIWSClientDialer 抽象 WS 建连器。 +type openAIWSClientDialer interface { + Dial(ctx context.Context, wsURL string, headers http.Header, proxyURL string) (openAIWSClientConn, int, http.Header, error) +} + +func newDefaultOpenAIWSClientDialer() openAIWSClientDialer { + return &coderOpenAIWSClientDialer{} +} + +type coderOpenAIWSClientDialer struct{} + +func (d *coderOpenAIWSClientDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + targetURL := strings.TrimSpace(wsURL) + if targetURL == "" { + return nil, 0, nil, errors.New("ws url is empty") + } + + opts := &coderws.DialOptions{ + HTTPHeader: cloneHeader(headers), + } + if proxy := strings.TrimSpace(proxyURL); proxy != "" { + parsedProxyURL, err := url.Parse(proxy) + if err != nil { + return nil, 0, nil, fmt.Errorf("invalid proxy url: %w", err) + } + opts.HTTPClient = &http.Client{ + Transport: &http.Transport{ + Proxy: http.ProxyURL(parsedProxyURL), + }, + } + } + + conn, resp, err := coderws.Dial(ctx, targetURL, opts) + if err != nil { + status := 0 + respHeaders := http.Header(nil) + if resp != nil { + status = resp.StatusCode + respHeaders = cloneHeader(resp.Header) + } + return nil, status, respHeaders, err + } + respHeaders := http.Header(nil) + if resp != nil { + respHeaders = cloneHeader(resp.Header) + } + return &coderOpenAIWSClientConn{conn: conn}, 0, respHeaders, nil +} + +type coderOpenAIWSClientConn struct { + conn *coderws.Conn +} + +func (c *coderOpenAIWSClientConn) WriteJSON(ctx context.Context, value any) error { + if c == nil || c.conn == nil { + return errOpenAIWSConnClosed + } + if ctx == nil { + ctx = context.Background() + } + return wsjson.Write(ctx, c.conn, value) +} + +func (c *coderOpenAIWSClientConn) ReadMessage(ctx context.Context) ([]byte, error) { + if c == nil || c.conn == nil { + return nil, errOpenAIWSConnClosed + } + if ctx == nil { + ctx = context.Background() + } + + msgType, payload, err := c.conn.Read(ctx) + if err != nil { + return nil, err + } + switch msgType { + case coderws.MessageText, coderws.MessageBinary: + return payload, nil + default: + return nil, errOpenAIWSConnClosed + } +} + +func (c *coderOpenAIWSClientConn) Ping(ctx context.Context) error { + if c == nil || c.conn == nil { + return errOpenAIWSConnClosed + } + if ctx == nil { + ctx = context.Background() + } + return c.conn.Ping(ctx) +} + +func (c *coderOpenAIWSClientConn) Close() error { + if c == nil || c.conn == nil { + return nil + } + // Close 为幂等,忽略重复关闭错误。 + _ = c.conn.Close(coderws.StatusNormalClosure, "") + _ = c.conn.CloseNow() + return nil +} diff --git a/backend/internal/service/openai_ws_fallback_test.go b/backend/internal/service/openai_ws_fallback_test.go new file mode 100644 index 000000000..b68e2efa5 --- /dev/null +++ b/backend/internal/service/openai_ws_fallback_test.go @@ -0,0 +1,60 @@ +package service + +import ( + "errors" + "net/http" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestClassifyOpenAIWSAcquireError(t *testing.T) { + t.Run("dial_426_upgrade_required", func(t *testing.T) { + err := &openAIWSDialError{StatusCode: 426, Err: errors.New("upgrade required")} + require.Equal(t, "upgrade_required", classifyOpenAIWSAcquireError(err)) + }) + + t.Run("queue_full", func(t *testing.T) { + require.Equal(t, "conn_queue_full", classifyOpenAIWSAcquireError(errOpenAIWSConnQueueFull)) + }) + + t.Run("other", func(t *testing.T) { + require.Equal(t, "acquire_conn", classifyOpenAIWSAcquireError(errors.New("x"))) + }) +} + +func TestClassifyOpenAIWSErrorEvent(t *testing.T) { + reason, recoverable := classifyOpenAIWSErrorEvent([]byte(`{"type":"error","error":{"code":"upgrade_required","message":"Upgrade required"}}`)) + require.Equal(t, "upgrade_required", reason) + require.True(t, recoverable) + + reason, recoverable = classifyOpenAIWSErrorEvent([]byte(`{"type":"error","error":{"code":"previous_response_not_found","message":"not found"}}`)) + require.Equal(t, "event_error", reason) + require.False(t, recoverable) +} + +func TestOpenAIWSErrorHTTPStatus(t *testing.T) { + require.Equal(t, http.StatusBadRequest, openAIWSErrorHTTPStatus([]byte(`{"type":"error","error":{"type":"invalid_request_error","code":"invalid_request","message":"invalid input"}}`))) + require.Equal(t, http.StatusUnauthorized, openAIWSErrorHTTPStatus([]byte(`{"type":"error","error":{"type":"authentication_error","code":"invalid_api_key","message":"auth failed"}}`))) + require.Equal(t, http.StatusForbidden, openAIWSErrorHTTPStatus([]byte(`{"type":"error","error":{"type":"permission_error","code":"forbidden","message":"forbidden"}}`))) + require.Equal(t, http.StatusTooManyRequests, openAIWSErrorHTTPStatus([]byte(`{"type":"error","error":{"type":"rate_limit_error","code":"rate_limit_exceeded","message":"rate limited"}}`))) + require.Equal(t, http.StatusBadGateway, openAIWSErrorHTTPStatus([]byte(`{"type":"error","error":{"type":"server_error","code":"server_error","message":"server"}}`))) +} + +func TestOpenAIWSFallbackCooling(t *testing.T) { + svc := &OpenAIGatewayService{cfg: &config.Config{}} + svc.cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + require.False(t, svc.isOpenAIWSFallbackCooling(1)) + svc.markOpenAIWSFallbackCooling(1, "upgrade_required") + require.True(t, svc.isOpenAIWSFallbackCooling(1)) + + svc.clearOpenAIWSFallbackCooling(1) + require.False(t, svc.isOpenAIWSFallbackCooling(1)) + + svc.markOpenAIWSFallbackCooling(2, "x") + time.Sleep(1200 * time.Millisecond) + require.False(t, svc.isOpenAIWSFallbackCooling(2)) +} diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go new file mode 100644 index 000000000..8f1c84c95 --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder.go @@ -0,0 +1,959 @@ +package service + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/logger" + "github.com/Wei-Shaw/sub2api/internal/pkg/openai" + "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" + "github.com/gin-gonic/gin" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" + "go.uber.org/zap" +) + +const ( + openAIWSBetaV1Value = "responses_websockets=2026-02-04" + openAIWSBetaV2Value = "responses_websockets=2026-02-06" + + openAIWSTurnStateHeader = "x-codex-turn-state" + openAIWSTurnMetadataHeader = "x-codex-turn-metadata" +) + +// openAIWSFallbackError 表示可安全回退到 HTTP 的 WS 错误(尚未写下游)。 +type openAIWSFallbackError struct { + Reason string + Err error +} + +func (e *openAIWSFallbackError) Error() string { + if e == nil { + return "" + } + if e.Err == nil { + return fmt.Sprintf("openai ws fallback: %s", strings.TrimSpace(e.Reason)) + } + return fmt.Sprintf("openai ws fallback: %s: %v", strings.TrimSpace(e.Reason), e.Err) +} + +func (e *openAIWSFallbackError) Unwrap() error { + if e == nil { + return nil + } + return e.Err +} + +func wrapOpenAIWSFallback(reason string, err error) error { + return &openAIWSFallbackError{Reason: strings.TrimSpace(reason), Err: err} +} + +func (s *OpenAIGatewayService) getOpenAIWSConnPool() *openAIWSConnPool { + if s == nil { + return nil + } + s.openaiWSInitMu.Lock() + defer s.openaiWSInitMu.Unlock() + if s.openaiWSPool == nil { + s.openaiWSPool = newOpenAIWSConnPool(s.cfg) + } + return s.openaiWSPool +} + +func (s *OpenAIGatewayService) SnapshotOpenAIWSPoolMetrics() OpenAIWSPoolMetricsSnapshot { + pool := s.getOpenAIWSConnPool() + if pool == nil { + return OpenAIWSPoolMetricsSnapshot{} + } + return pool.SnapshotMetrics() +} + +func (s *OpenAIGatewayService) getOpenAIWSStateStore() OpenAIWSStateStore { + if s == nil { + return nil + } + s.openaiWSInitMu.Lock() + defer s.openaiWSInitMu.Unlock() + if s.openaiWSStateStore == nil { + s.openaiWSStateStore = NewOpenAIWSStateStore(s.cache) + } + return s.openaiWSStateStore +} + +func (s *OpenAIGatewayService) openAIWSResponseStickyTTL() time.Duration { + if s != nil && s.cfg != nil { + seconds := s.cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds + if seconds > 0 { + return time.Duration(seconds) * time.Second + } + } + return time.Hour +} + +func (s *OpenAIGatewayService) openAIWSReadTimeout() time.Duration { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.ReadTimeoutSeconds > 0 { + return time.Duration(s.cfg.Gateway.OpenAIWS.ReadTimeoutSeconds) * time.Second + } + return 15 * time.Minute +} + +func (s *OpenAIGatewayService) openAIWSWriteTimeout() time.Duration { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.WriteTimeoutSeconds > 0 { + return time.Duration(s.cfg.Gateway.OpenAIWS.WriteTimeoutSeconds) * time.Second + } + return 2 * time.Minute +} + +func (s *OpenAIGatewayService) openAIWSDialTimeout() time.Duration { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.DialTimeoutSeconds > 0 { + return time.Duration(s.cfg.Gateway.OpenAIWS.DialTimeoutSeconds) * time.Second + } + return 10 * time.Second +} + +func (s *OpenAIGatewayService) buildOpenAIResponsesWSURL(account *Account) (string, error) { + if account == nil { + return "", errors.New("account is nil") + } + var targetURL string + switch account.Type { + case AccountTypeOAuth: + targetURL = chatgptCodexURL + case AccountTypeAPIKey: + baseURL := account.GetOpenAIBaseURL() + if baseURL == "" { + targetURL = openaiPlatformAPIURL + } else { + validatedURL, err := s.validateUpstreamBaseURL(baseURL) + if err != nil { + return "", err + } + targetURL = buildOpenAIResponsesURL(validatedURL) + } + default: + targetURL = openaiPlatformAPIURL + } + + parsed, err := url.Parse(strings.TrimSpace(targetURL)) + if err != nil { + return "", fmt.Errorf("invalid target url: %w", err) + } + switch strings.ToLower(parsed.Scheme) { + case "https": + parsed.Scheme = "wss" + case "http": + parsed.Scheme = "ws" + case "wss", "ws": + // 保持不变 + default: + return "", fmt.Errorf("unsupported scheme for ws: %s", parsed.Scheme) + } + return parsed.String(), nil +} + +func (s *OpenAIGatewayService) buildOpenAIWSHeaders( + c *gin.Context, + account *Account, + token string, + decision OpenAIWSProtocolDecision, + isCodexCLI bool, + turnState string, + turnMetadata string, +) http.Header { + headers := make(http.Header) + headers.Set("authorization", "Bearer "+token) + + if c != nil && c.Request != nil { + if v := strings.TrimSpace(c.Request.Header.Get("accept-language")); v != "" { + headers.Set("accept-language", v) + } + } + if state := strings.TrimSpace(turnState); state != "" { + headers.Set(openAIWSTurnStateHeader, state) + } + if metadata := strings.TrimSpace(turnMetadata); metadata != "" { + headers.Set(openAIWSTurnMetadataHeader, metadata) + } + + if account != nil && account.Type == AccountTypeOAuth { + if chatgptAccountID := account.GetChatGPTAccountID(); chatgptAccountID != "" { + headers.Set("chatgpt-account-id", chatgptAccountID) + } + if isCodexCLI { + headers.Set("originator", "codex_cli_rs") + } else { + headers.Set("originator", "opencode") + } + } + + betaValue := openAIWSBetaV2Value + if decision.Transport == OpenAIUpstreamTransportResponsesWebsocket { + betaValue = openAIWSBetaV1Value + } + if account != nil && account.Type == AccountTypeOAuth { + headers.Set("OpenAI-Beta", betaValue+",responses=experimental") + } else { + headers.Set("OpenAI-Beta", betaValue) + } + + customUA := "" + if account != nil { + customUA = account.GetOpenAIUserAgent() + } + if strings.TrimSpace(customUA) != "" { + headers.Set("user-agent", customUA) + } else if c != nil { + if ua := strings.TrimSpace(c.GetHeader("User-Agent")); ua != "" { + headers.Set("user-agent", ua) + } + } + if s != nil && s.cfg != nil && s.cfg.Gateway.ForceCodexCLI { + headers.Set("user-agent", codexCLIUserAgent) + } + if account != nil && account.Type == AccountTypeOAuth && !openai.IsCodexCLIRequest(headers.Get("user-agent")) { + headers.Set("user-agent", codexCLIUserAgent) + } + + return headers +} + +func (s *OpenAIGatewayService) buildOpenAIWSCreatePayload(reqBody map[string]any, account *Account) map[string]any { + payload := make(map[string]any, len(reqBody)+1) + for k, v := range reqBody { + payload[k] = v + } + + delete(payload, "stream") + delete(payload, "background") + payload["type"] = "response.create" + + // OAuth 默认保持 store=false,避免误依赖服务端历史。 + if account != nil && account.Type == AccountTypeOAuth && !s.isOpenAIWSStoreRecoveryAllowed(account) { + payload["store"] = false + } + return payload +} + +func setOpenAIWSTurnMetadata(payload map[string]any, turnMetadata string) { + if len(payload) == 0 { + return + } + metadata := strings.TrimSpace(turnMetadata) + if metadata == "" { + return + } + + switch existing := payload["client_metadata"].(type) { + case map[string]any: + existing[openAIWSTurnMetadataHeader] = metadata + payload["client_metadata"] = existing + case map[string]string: + next := make(map[string]any, len(existing)+1) + for k, v := range existing { + next[k] = v + } + next[openAIWSTurnMetadataHeader] = metadata + payload["client_metadata"] = next + default: + payload["client_metadata"] = map[string]any{ + openAIWSTurnMetadataHeader: metadata, + } + } +} + +func (s *OpenAIGatewayService) isOpenAIWSStoreRecoveryAllowed(account *Account) bool { + if account != nil && account.IsOpenAIWSAllowStoreRecoveryEnabled() { + return true + } + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.AllowStoreRecovery { + return true + } + return false +} + +func (s *OpenAIGatewayService) forwardOpenAIWSV2( + ctx context.Context, + c *gin.Context, + account *Account, + reqBody map[string]any, + token string, + decision OpenAIWSProtocolDecision, + isCodexCLI bool, + reqStream bool, + originalModel string, + mappedModel string, + startTime time.Time, +) (*OpenAIForwardResult, error) { + if s == nil || account == nil { + return nil, wrapOpenAIWSFallback("invalid_state", errors.New("service or account is nil")) + } + + wsURL, err := s.buildOpenAIResponsesWSURL(account) + if err != nil { + return nil, wrapOpenAIWSFallback("build_ws_url", err) + } + + payload := s.buildOpenAIWSCreatePayload(reqBody, account) + previousResponseID := strings.TrimSpace(gjson.Get(payloadAsJSON(payload), "previous_response_id").String()) + + stateStore := s.getOpenAIWSStateStore() + groupID := getOpenAIGroupIDFromContext(c) + sessionHash := s.GenerateSessionHash(c, []byte(payloadAsJSON(payload))) + turnState := "" + turnMetadata := "" + if c != nil && c.Request != nil { + turnState = strings.TrimSpace(c.GetHeader(openAIWSTurnStateHeader)) + turnMetadata = strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)) + } + setOpenAIWSTurnMetadata(payload, turnMetadata) + if turnState == "" && stateStore != nil && sessionHash != "" { + if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { + turnState = savedTurnState + } + } + preferredConnID := "" + if stateStore != nil && previousResponseID != "" { + if connID, ok := stateStore.GetResponseConn(previousResponseID); ok { + preferredConnID = connID + } + } + + acquireCtx, acquireCancel := context.WithTimeout(ctx, s.openAIWSDialTimeout()+s.openAIWSWriteTimeout()) + defer acquireCancel() + + lease, err := s.getOpenAIWSConnPool().Acquire(acquireCtx, openAIWSAcquireRequest{ + Account: account, + WSURL: wsURL, + Headers: s.buildOpenAIWSHeaders(c, account, token, decision, isCodexCLI, turnState, turnMetadata), + PreferredConnID: preferredConnID, + ProxyURL: func() string { + if account.ProxyID != nil && account.Proxy != nil { + return account.Proxy.URL() + } + return "" + }(), + }) + if err != nil { + return nil, wrapOpenAIWSFallback(classifyOpenAIWSAcquireError(err), err) + } + defer lease.Release() + if c != nil { + SetOpsLatencyMs(c, OpsOpenAIWSQueueWaitMsKey, lease.QueueWaitDuration().Milliseconds()) + c.Set(OpsOpenAIWSConnReusedKey, lease.Reused()) + if connID := strings.TrimSpace(lease.ConnID()); connID != "" { + c.Set(OpsOpenAIWSConnIDKey, connID) + } + } + + handshakeTurnState := strings.TrimSpace(lease.HandshakeHeader(openAIWSTurnStateHeader)) + if handshakeTurnState != "" { + if stateStore != nil && sessionHash != "" { + stateStore.BindSessionTurnState(groupID, sessionHash, handshakeTurnState, s.openAIWSSessionStickyTTL()) + } + if c != nil { + c.Header(http.CanonicalHeaderKey(openAIWSTurnStateHeader), handshakeTurnState) + } + } + + if err := s.performOpenAIWSGeneratePrewarm( + ctx, + lease, + decision, + payload, + previousResponseID, + reqBody, + account, + stateStore, + groupID, + ); err != nil { + return nil, err + } + + if err := lease.WriteJSON(payload, s.openAIWSWriteTimeout()); err != nil { + lease.MarkBroken() + return nil, wrapOpenAIWSFallback("write_request", err) + } + + usage := &OpenAIUsage{} + var firstTokenMs *int + responseID := "" + var finalResponse []byte + wroteDownstream := false + needModelReplace := originalModel != mappedModel + + var flusher http.Flusher + if reqStream { + if s.cfg != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), http.Header{}, s.cfg.Security.ResponseHeaders) + } + c.Header("Content-Type", "text/event-stream") + c.Header("Cache-Control", "no-cache") + c.Header("Connection", "keep-alive") + c.Header("X-Accel-Buffering", "no") + f, ok := c.Writer.(http.Flusher) + if !ok { + lease.MarkBroken() + return nil, wrapOpenAIWSFallback("streaming_not_supported", errors.New("streaming not supported")) + } + flusher = f + } + + clientDisconnected := false + for { + message, readErr := lease.ReadMessage(s.openAIWSReadTimeout()) + if readErr != nil { + lease.MarkBroken() + if !wroteDownstream { + return nil, wrapOpenAIWSFallback("read_event", readErr) + } + if clientDisconnected { + break + } + setOpsUpstreamError(c, 0, sanitizeUpstreamErrorMessage(readErr.Error()), "") + return nil, fmt.Errorf("openai ws read event: %w", readErr) + } + + eventType := strings.TrimSpace(gjson.GetBytes(message, "type").String()) + if eventType == "" { + continue + } + + if responseID == "" { + responseID = extractOpenAIWSResponseID(message) + } + + if firstTokenMs == nil && isOpenAIWSTokenEvent(eventType) { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + + if needModelReplace { + message = replaceOpenAIWSMessageModel(message, mappedModel, originalModel) + } + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEData(string(message)); changed { + message = []byte(corrected) + } + s.parseSSEUsage(string(message), usage) + + if eventType == "error" { + errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) + if errMsg == "" { + errMsg = "Upstream websocket error" + } + fallbackReason, canFallback := classifyOpenAIWSErrorEvent(message) + if !wroteDownstream && canFallback { + return nil, wrapOpenAIWSFallback(fallbackReason, errors.New(errMsg)) + } + statusCode := openAIWSErrorHTTPStatus(message) + setOpsUpstreamError(c, statusCode, errMsg, "") + if reqStream && !clientDisconnected { + if _, wErr := fmt.Fprintf(c.Writer, "data: %s\n\n", message); wErr == nil { + flusher.Flush() + } + } + if !reqStream { + c.JSON(statusCode, gin.H{ + "error": gin.H{ + "type": "upstream_error", + "message": errMsg, + }, + }) + } + return nil, fmt.Errorf("openai ws error event: %s", errMsg) + } + + if reqStream { + if !clientDisconnected { + if _, wErr := fmt.Fprintf(c.Writer, "data: %s\n\n", message); wErr == nil { + wroteDownstream = true + flusher.Flush() + } else { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS] client disconnected, continue draining upstream: account=%d", account.ID) + } + } + } else { + if response := gjson.GetBytes(message, "response"); response.Exists() && response.Type == gjson.JSON { + finalResponse = []byte(response.Raw) + } + } + + if isOpenAIWSTerminalEvent(eventType) { + if !reqStream { + if len(finalResponse) == 0 { + if resp := gjson.GetBytes(message, "response"); resp.Exists() && resp.Type == gjson.JSON { + finalResponse = []byte(resp.Raw) + } + } + } + break + } + } + + if !reqStream { + if len(finalResponse) == 0 { + if !wroteDownstream { + return nil, wrapOpenAIWSFallback("missing_final_response", errors.New("no terminal response payload")) + } + return nil, errors.New("ws finished without final response") + } + + if needModelReplace { + finalResponse = s.replaceModelInResponseBody(finalResponse, mappedModel, originalModel) + } + finalResponse = s.correctToolCallsInResponseBody(finalResponse) + populateOpenAIUsageFromResponseJSON(finalResponse, usage) + if responseID == "" { + responseID = strings.TrimSpace(gjson.GetBytes(finalResponse, "id").String()) + } + + c.Data(http.StatusOK, "application/json", finalResponse) + } + + if responseID != "" && stateStore != nil { + ttl := s.openAIWSResponseStickyTTL() + _ = stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl) + stateStore.BindResponseConn(responseID, lease.ConnID(), ttl) + } + + return &OpenAIForwardResult{ + RequestID: responseID, + Usage: *usage, + Model: originalModel, + ReasoningEffort: extractOpenAIReasoningEffort(reqBody, originalModel), + Stream: reqStream, + Duration: time.Since(startTime), + FirstTokenMs: firstTokenMs, + }, nil +} + +func (s *OpenAIGatewayService) isOpenAIWSGeneratePrewarmEnabled() bool { + return s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.PrewarmGenerateEnabled +} + +// performOpenAIWSGeneratePrewarm 在 WSv2 下执行可选的 generate=false 预热。 +// 预热默认关闭,仅在配置开启后生效;失败时按可恢复错误回退到 HTTP。 +func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( + ctx context.Context, + lease *openAIWSConnLease, + decision OpenAIWSProtocolDecision, + payload map[string]any, + previousResponseID string, + reqBody map[string]any, + account *Account, + stateStore OpenAIWSStateStore, + groupID int64, +) error { + if s == nil || lease == nil || account == nil { + return nil + } + if !s.isOpenAIWSGeneratePrewarmEnabled() { + return nil + } + if decision.Transport != OpenAIUpstreamTransportResponsesWebsocketV2 { + return nil + } + if strings.TrimSpace(previousResponseID) != "" { + return nil + } + if lease.IsPrewarmed() { + return nil + } + if NeedsToolContinuation(reqBody) { + return nil + } + + prewarmPayload := make(map[string]any, len(payload)+1) + for k, v := range payload { + prewarmPayload[k] = v + } + prewarmPayload["generate"] = false + + if err := lease.WriteJSON(prewarmPayload, s.openAIWSWriteTimeout()); err != nil { + lease.MarkBroken() + return wrapOpenAIWSFallback("prewarm_write", err) + } + + prewarmResponseID := "" + for { + message, readErr := lease.ReadMessage(s.openAIWSReadTimeout()) + if readErr != nil { + lease.MarkBroken() + return wrapOpenAIWSFallback("prewarm_read_event", readErr) + } + + eventType := strings.TrimSpace(gjson.GetBytes(message, "type").String()) + if eventType == "" { + continue + } + if prewarmResponseID == "" { + prewarmResponseID = extractOpenAIWSResponseID(message) + } + + if eventType == "error" { + errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) + if errMsg == "" { + errMsg = "OpenAI websocket prewarm error" + } + fallbackReason, canFallback := classifyOpenAIWSErrorEvent(message) + lease.MarkBroken() + if canFallback { + return wrapOpenAIWSFallback("prewarm_"+fallbackReason, errors.New(errMsg)) + } + return wrapOpenAIWSFallback("prewarm_error_event", errors.New(errMsg)) + } + + if isOpenAIWSTerminalEvent(eventType) { + break + } + } + + lease.MarkPrewarmed() + if prewarmResponseID != "" && stateStore != nil { + ttl := s.openAIWSResponseStickyTTL() + _ = stateStore.BindResponseAccount(ctx, groupID, prewarmResponseID, account.ID, ttl) + stateStore.BindResponseConn(prewarmResponseID, lease.ConnID(), ttl) + } + return nil +} + +func payloadAsJSON(payload map[string]any) string { + if len(payload) == 0 { + return "{}" + } + body, err := json.Marshal(payload) + if err != nil { + return "{}" + } + return string(body) +} + +func extractOpenAIWSResponseID(message []byte) string { + if id := strings.TrimSpace(gjson.GetBytes(message, "response.id").String()); id != "" { + return id + } + if id := strings.TrimSpace(gjson.GetBytes(message, "id").String()); id != "" { + return id + } + return "" +} + +func isOpenAIWSTerminalEvent(eventType string) bool { + switch strings.TrimSpace(eventType) { + case "response.completed", "response.done", "response.failed", "response.incomplete", "response.cancelled", "response.canceled": + return true + default: + return false + } +} + +func isOpenAIWSTokenEvent(eventType string) bool { + eventType = strings.TrimSpace(eventType) + if eventType == "" { + return false + } + switch eventType { + case "response.created", "response.in_progress", "response.output_item.added", "response.output_item.done": + return false + } + if strings.Contains(eventType, ".delta") { + return true + } + if strings.HasPrefix(eventType, "response.output_text") { + return true + } + if strings.HasPrefix(eventType, "response.output") { + return true + } + return eventType == "response.completed" || eventType == "response.done" +} + +func replaceOpenAIWSMessageModel(message []byte, fromModel, toModel string) []byte { + if len(message) == 0 { + return message + } + updated := message + if m := gjson.GetBytes(updated, "model"); m.Exists() && m.Str == fromModel { + if next, err := sjson.SetBytes(updated, "model", toModel); err == nil { + updated = next + } + } + if m := gjson.GetBytes(updated, "response.model"); m.Exists() && m.Str == fromModel { + if next, err := sjson.SetBytes(updated, "response.model", toModel); err == nil { + updated = next + } + } + return updated +} + +func populateOpenAIUsageFromResponseJSON(body []byte, usage *OpenAIUsage) { + if usage == nil || len(body) == 0 { + return + } + usage.InputTokens = int(gjson.GetBytes(body, "usage.input_tokens").Int()) + usage.OutputTokens = int(gjson.GetBytes(body, "usage.output_tokens").Int()) + usage.CacheReadInputTokens = int(gjson.GetBytes(body, "usage.input_tokens_details.cached_tokens").Int()) +} + +func getOpenAIGroupIDFromContext(c *gin.Context) int64 { + if c == nil { + return 0 + } + value, exists := c.Get("api_key") + if !exists { + return 0 + } + apiKey, ok := value.(*APIKey) + if !ok || apiKey == nil || apiKey.GroupID == nil { + return 0 + } + return *apiKey.GroupID +} + +// SelectAccountByPreviousResponseID 按 previous_response_id 命中账号粘连。 +// 未命中或账号不可用时返回 (nil, nil),由调用方继续走常规调度。 +func (s *OpenAIGatewayService) SelectAccountByPreviousResponseID( + ctx context.Context, + groupID *int64, + previousResponseID string, + requestedModel string, + excludedIDs map[int64]struct{}, +) (*AccountSelectionResult, error) { + if s == nil { + return nil, nil + } + responseID := strings.TrimSpace(previousResponseID) + if responseID == "" { + return nil, nil + } + store := s.getOpenAIWSStateStore() + if store == nil { + return nil, nil + } + + accountID, err := store.GetResponseAccount(ctx, derefGroupID(groupID), responseID) + if err != nil || accountID <= 0 { + return nil, nil + } + if excludedIDs != nil { + if _, excluded := excludedIDs[accountID]; excluded { + return nil, nil + } + } + + account, err := s.getSchedulableAccount(ctx, accountID) + if err != nil || account == nil { + _ = store.DeleteResponseAccount(ctx, derefGroupID(groupID), responseID) + return nil, nil + } + // 非 WSv2 场景(如 force_http/全局关闭)不应使用 previous_response_id 粘连, + // 以保持“回滚到 HTTP”后的历史行为一致性。 + if s.getOpenAIWSProtocolResolver().Resolve(account).Transport != OpenAIUpstreamTransportResponsesWebsocketV2 { + return nil, nil + } + if shouldClearStickySession(account, requestedModel) || !account.IsOpenAI() { + _ = store.DeleteResponseAccount(ctx, derefGroupID(groupID), responseID) + return nil, nil + } + if requestedModel != "" && !account.IsModelSupported(requestedModel) { + return nil, nil + } + + result, acquireErr := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) + if acquireErr == nil && result.Acquired { + _ = store.BindResponseAccount(ctx, derefGroupID(groupID), responseID, accountID, s.openAIWSResponseStickyTTL()) + return &AccountSelectionResult{ + Account: account, + Acquired: true, + ReleaseFunc: result.ReleaseFunc, + }, nil + } + + cfg := s.schedulingConfig() + if s.concurrencyService != nil { + waitingCount, _ := s.concurrencyService.GetAccountWaitingCount(ctx, accountID) + if waitingCount < cfg.StickySessionMaxWaiting { + return &AccountSelectionResult{ + Account: account, + WaitPlan: &AccountWaitPlan{ + AccountID: accountID, + MaxConcurrency: account.Concurrency, + Timeout: cfg.StickySessionWaitTimeout, + MaxWaiting: cfg.StickySessionMaxWaiting, + }, + }, nil + } + } + return nil, nil +} + +func (s *OpenAIGatewayService) logOpenAIWSFallback(ctx context.Context, account *Account, reason string, err error) { + if s == nil { + return + } + fields := []zap.Field{ + zap.String("component", "service.openai_gateway"), + zap.String("reason", strings.TrimSpace(reason)), + } + if account != nil { + fields = append(fields, + zap.Int64("account_id", account.ID), + zap.String("account_type", string(account.Type)), + ) + } + if err != nil { + fields = append(fields, zap.Error(err)) + } + logger.FromContext(ctx).With(fields...).Warn("OpenAI WS 回退到 HTTP") +} + +func classifyOpenAIWSAcquireError(err error) string { + if err == nil { + return "acquire_conn" + } + var dialErr *openAIWSDialError + if errors.As(err, &dialErr) { + switch dialErr.StatusCode { + case 426: + return "upgrade_required" + case 401, 403: + return "auth_failed" + case 429: + return "upstream_rate_limited" + } + if dialErr.StatusCode >= 500 { + return "upstream_5xx" + } + return "dial_failed" + } + if errors.Is(err, errOpenAIWSConnQueueFull) { + return "conn_queue_full" + } + if errors.Is(err, context.DeadlineExceeded) { + return "acquire_timeout" + } + return "acquire_conn" +} + +func classifyOpenAIWSErrorEvent(message []byte) (string, bool) { + if len(message) == 0 { + return "event_error", false + } + code := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.code").String())) + errType := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.type").String())) + msg := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.message").String())) + + switch code { + case "upgrade_required": + return "upgrade_required", true + case "websocket_not_supported", "websocket_unsupported": + return "ws_unsupported", true + } + if strings.Contains(msg, "upgrade required") || strings.Contains(msg, "status 426") { + return "upgrade_required", true + } + if strings.Contains(errType, "upgrade") { + return "upgrade_required", true + } + if strings.Contains(msg, "websocket") && strings.Contains(msg, "unsupported") { + return "ws_unsupported", true + } + if strings.Contains(errType, "server_error") || strings.Contains(code, "server_error") { + return "upstream_error_event", true + } + return "event_error", false +} + +func openAIWSErrorHTTPStatus(message []byte) int { + if len(message) == 0 { + return http.StatusBadGateway + } + code := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.code").String())) + errType := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.type").String())) + + switch { + case strings.Contains(errType, "invalid_request"), + strings.Contains(code, "invalid_request"), + strings.Contains(code, "bad_request"), + code == "previous_response_not_found": + return http.StatusBadRequest + case strings.Contains(errType, "authentication"), + strings.Contains(code, "invalid_api_key"), + strings.Contains(code, "unauthorized"): + return http.StatusUnauthorized + case strings.Contains(errType, "permission"), + strings.Contains(code, "forbidden"): + return http.StatusForbidden + case strings.Contains(errType, "rate_limit"), + strings.Contains(code, "rate_limit"), + strings.Contains(code, "insufficient_quota"): + return http.StatusTooManyRequests + default: + return http.StatusBadGateway + } +} + +func (s *OpenAIGatewayService) openAIWSFallbackCooldown() time.Duration { + if s == nil || s.cfg == nil { + return 30 * time.Second + } + seconds := s.cfg.Gateway.OpenAIWS.FallbackCooldownSeconds + if seconds <= 0 { + return 0 + } + return time.Duration(seconds) * time.Second +} + +func (s *OpenAIGatewayService) isOpenAIWSFallbackCooling(accountID int64) bool { + if s == nil || accountID <= 0 { + return false + } + cooldown := s.openAIWSFallbackCooldown() + if cooldown <= 0 { + return false + } + s.openaiWSFallbackMu.Lock() + defer s.openaiWSFallbackMu.Unlock() + until := s.openaiWSFallbackUntil[accountID] + if until.IsZero() { + return false + } + if time.Now().Before(until) { + return true + } + delete(s.openaiWSFallbackUntil, accountID) + return false +} + +func (s *OpenAIGatewayService) markOpenAIWSFallbackCooling(accountID int64, _ string) { + if s == nil || accountID <= 0 { + return + } + cooldown := s.openAIWSFallbackCooldown() + if cooldown <= 0 { + return + } + s.openaiWSFallbackMu.Lock() + if s.openaiWSFallbackUntil == nil { + s.openaiWSFallbackUntil = make(map[int64]time.Time, 32) + } + s.openaiWSFallbackUntil[accountID] = time.Now().Add(cooldown) + s.openaiWSFallbackMu.Unlock() +} + +func (s *OpenAIGatewayService) clearOpenAIWSFallbackCooling(accountID int64) { + if s == nil || accountID <= 0 { + return + } + s.openaiWSFallbackMu.Lock() + if s.openaiWSFallbackUntil != nil { + delete(s.openaiWSFallbackUntil, accountID) + } + s.openaiWSFallbackMu.Unlock() +} diff --git a/backend/internal/service/openai_ws_forwarder_success_test.go b/backend/internal/service/openai_ws_forwarder_success_test.go new file mode 100644 index 000000000..ceb29928d --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder_success_test.go @@ -0,0 +1,766 @@ +package service + +import ( + "context" + "encoding/json" + "io" + "net/http" + "net/http/httptest" + "strconv" + "strings" + "sync" + "sync/atomic" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" + "github.com/gorilla/websocket" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +func TestOpenAIGatewayService_Forward_WSv2_SuccessAndBindSticky(t *testing.T) { + gin.SetMode(gin.TestMode) + + type receivedPayload struct { + Type string + PreviousResponseID string + } + receivedCh := make(chan receivedPayload, 1) + + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var request map[string]any + if err := conn.ReadJSON(&request); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + receivedCh <- receivedPayload{ + Type: strings.TrimSpace(gjson.Get(requestToJSONString(request), "type").String()), + PreviousResponseID: strings.TrimSpace(gjson.Get(requestToJSONString(request), "previous_response_id").String()), + } + + if err := conn.WriteJSON(map[string]any{ + "type": "response.created", + "response": map[string]any{ + "id": "resp_new_1", + "model": "gpt-5.1", + }, + }); err != nil { + t.Errorf("write response.created failed: %v", err) + return + } + if err := conn.WriteJSON(map[string]any{ + "type": "response.completed", + "response": map[string]any{ + "id": "resp_new_1", + "model": "gpt-5.1", + "usage": map[string]any{ + "input_tokens": 12, + "output_tokens": 7, + "input_tokens_details": map[string]any{ + "cached_tokens": 3, + }, + }, + }, + }); err != nil { + t.Errorf("write response.completed failed: %v", err) + return + } + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + groupID := int64(1001) + c.Set("api_key", &APIKey{GroupID: &groupID}) + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 30 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 10 + cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds = 3600 + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + cache := &stubGatewayCache{} + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + cache: cache, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 9, + Name: "openai-ws", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 2, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_prev_1","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, 12, result.Usage.InputTokens) + require.Equal(t, 7, result.Usage.OutputTokens) + require.Equal(t, 3, result.Usage.CacheReadInputTokens) + require.Equal(t, "resp_new_1", result.RequestID) + require.False(t, gjson.GetBytes(upstream.lastBody, "model").Exists(), "WSv2 成功时不应回落 HTTP 上游") + + received := <-receivedCh + require.Equal(t, "response.create", received.Type) + require.Equal(t, "resp_prev_1", received.PreviousResponseID) + + store := svc.getOpenAIWSStateStore() + mappedAccountID, getErr := store.GetResponseAccount(context.Background(), groupID, "resp_new_1") + require.NoError(t, getErr) + require.Equal(t, account.ID, mappedAccountID) + connID, ok := store.GetResponseConn("resp_new_1") + require.True(t, ok) + require.NotEmpty(t, connID) + + responseBody := rec.Body.Bytes() + require.Equal(t, "resp_new_1", gjson.GetBytes(responseBody, "id").String()) +} + +func requestToJSONString(payload map[string]any) string { + if len(payload) == 0 { + return "{}" + } + b, err := json.Marshal(payload) + if err != nil { + return "{}" + } + return string(b) +} + +func TestOpenAIGatewayService_Forward_WSv2_PoolReuseNotOneToOne(t *testing.T) { + gin.SetMode(gin.TestMode) + + var upgradeCount atomic.Int64 + var sequence atomic.Int64 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + upgradeCount.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + for { + var request map[string]any + if err := conn.ReadJSON(&request); err != nil { + return + } + idx := sequence.Add(1) + responseID := "resp_reuse_" + strconv.FormatInt(idx, 10) + if err := conn.WriteJSON(map[string]any{ + "type": "response.created", + "response": map[string]any{ + "id": responseID, + "model": "gpt-5.1", + }, + }); err != nil { + return + } + if err := conn.WriteJSON(map[string]any{ + "type": "response.completed", + "response": map[string]any{ + "id": responseID, + "model": "gpt-5.1", + "usage": map[string]any{ + "input_tokens": 2, + "output_tokens": 1, + }, + }, + }); err != nil { + return + } + } + })) + defer wsServer.Close() + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 30 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 10 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + account := &Account{ + ID: 19, + Name: "openai-ws", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 2, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + for i := 0; i < 2; i++ { + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + groupID := int64(2001) + c.Set("api_key", &APIKey{GroupID: &groupID}) + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_prev_reuse","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.True(t, strings.HasPrefix(result.RequestID, "resp_reuse_")) + } + + require.Equal(t, int64(1), upgradeCount.Load(), "多个客户端请求应复用账号连接池而不是 1:1 对等建链") + metrics := svc.SnapshotOpenAIWSPoolMetrics() + require.GreaterOrEqual(t, metrics.AcquireReuseTotal, int64(1)) +} + +func TestOpenAIGatewayService_Forward_WSv2_OAuthStoreFalseByDefault(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.AllowStoreRecovery = false + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + + captureConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_oauth_1","model":"gpt-5.1","usage":{"input_tokens":3,"output_tokens":2}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + account := &Account{ + ID: 29, + Name: "openai-oauth", + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "access_token": "oauth-token-1", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"store":true,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_oauth_1", result.RequestID) + + require.NotNil(t, captureConn.lastWrite) + requestJSON := requestToJSONString(captureConn.lastWrite) + require.True(t, gjson.Get(requestJSON, "store").Exists(), "OAuth WSv2 应显式写入 store 字段") + require.False(t, gjson.Get(requestJSON, "store").Bool(), "默认策略应将 OAuth store 置为 false") + require.Contains(t, captureDialer.lastHeaders.Get("OpenAI-Beta"), "responses=experimental") +} + +func TestOpenAIGatewayService_Forward_WSv1_Unsupported(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsockets = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = false + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 39, + Name: "openai-ws-v1", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": "https://api.openai.com/v1/responses", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_prev_v1","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "ws v1") + require.Equal(t, http.StatusBadRequest, rec.Code) + require.Contains(t, rec.Body.String(), "WSv1") + require.Nil(t, upstream.lastReq, "WSv1 不支持时不应触发 HTTP 上游请求") +} + +func TestOpenAIGatewayService_Forward_WSv2_TurnStateAndMetadataReplayOnReconnect(t *testing.T) { + gin.SetMode(gin.TestMode) + + var connIndex atomic.Int64 + headersCh := make(chan http.Header, 4) + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + idx := connIndex.Add(1) + headersCh <- cloneHeader(r.Header) + + respHeader := http.Header{} + if idx == 1 { + respHeader.Set("x-codex-turn-state", "turn_state_first") + } + conn, err := upgrader.Upgrade(w, r, respHeader) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var request map[string]any + if err := conn.ReadJSON(&request); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + responseID := "resp_turn_" + strconv.FormatInt(idx, 10) + if err := conn.WriteJSON(map[string]any{ + "type": "response.completed", + "response": map[string]any{ + "id": responseID, + "model": "gpt-5.1", + "usage": map[string]any{ + "input_tokens": 2, + "output_tokens": 1, + }, + }, + }); err != nil { + t.Errorf("write response.completed failed: %v", err) + return + } + })) + defer wsServer.Close() + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 0 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 49, + Name: "openai-turn-state", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + reqBody := []byte(`{"model":"gpt-5.1","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + rec1 := httptest.NewRecorder() + c1, _ := gin.CreateTestContext(rec1) + c1.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c1.Request.Header.Set("session_id", "session_turn_state") + c1.Request.Header.Set("x-codex-turn-metadata", "turn_meta_1") + result1, err := svc.Forward(context.Background(), c1, account, reqBody) + require.NoError(t, err) + require.NotNil(t, result1) + + sessionHash := svc.GenerateSessionHash(c1, reqBody) + store := svc.getOpenAIWSStateStore() + turnState, ok := store.GetSessionTurnState(0, sessionHash) + require.True(t, ok) + require.Equal(t, "turn_state_first", turnState) + + // 主动淘汰连接,模拟下一次请求发生重连。 + connID, hasConn := store.GetResponseConn(result1.RequestID) + require.True(t, hasConn) + svc.getOpenAIWSConnPool().evictConn(account.ID, connID) + + rec2 := httptest.NewRecorder() + c2, _ := gin.CreateTestContext(rec2) + c2.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c2.Request.Header.Set("session_id", "session_turn_state") + c2.Request.Header.Set("x-codex-turn-metadata", "turn_meta_2") + result2, err := svc.Forward(context.Background(), c2, account, reqBody) + require.NoError(t, err) + require.NotNil(t, result2) + + firstHandshakeHeaders := <-headersCh + secondHandshakeHeaders := <-headersCh + require.Equal(t, "turn_meta_1", firstHandshakeHeaders.Get("X-Codex-Turn-Metadata")) + require.Equal(t, "turn_meta_2", secondHandshakeHeaders.Get("X-Codex-Turn-Metadata")) + require.Equal(t, "turn_state_first", secondHandshakeHeaders.Get("X-Codex-Turn-State")) +} + +func TestOpenAIGatewayService_Forward_WSv2_GeneratePrewarm(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("session_id", "session-prewarm") + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.PrewarmGenerateEnabled = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + + captureConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_prewarm_1","model":"gpt-5.1","usage":{"input_tokens":0,"output_tokens":0}}}`), + []byte(`{"type":"response.completed","response":{"id":"resp_main_1","model":"gpt-5.1","usage":{"input_tokens":4,"output_tokens":2}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 59, + Name: "openai-prewarm", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_main_1", result.RequestID) + + require.Len(t, captureConn.writes, 2, "开启 generate=false 预热后应发送两次 WS 请求") + firstWrite := requestToJSONString(captureConn.writes[0]) + secondWrite := requestToJSONString(captureConn.writes[1]) + require.True(t, gjson.Get(firstWrite, "generate").Exists()) + require.False(t, gjson.Get(firstWrite, "generate").Bool()) + require.False(t, gjson.Get(secondWrite, "generate").Exists()) +} + +func TestOpenAIGatewayService_Forward_WSv2_TurnMetadataInPayloadOnConnReuse(t *testing.T) { + gin.SetMode(gin.TestMode) + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + + captureConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_meta_1","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + []byte(`{"type":"response.completed","response":{"id":"resp_meta_2","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 69, + Name: "openai-turn-metadata", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + + rec1 := httptest.NewRecorder() + c1, _ := gin.CreateTestContext(rec1) + c1.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c1.Request.Header.Set("session_id", "session-metadata-reuse") + c1.Request.Header.Set("x-codex-turn-metadata", "turn_meta_payload_1") + result1, err := svc.Forward(context.Background(), c1, account, body) + require.NoError(t, err) + require.NotNil(t, result1) + require.Equal(t, "resp_meta_1", result1.RequestID) + + rec2 := httptest.NewRecorder() + c2, _ := gin.CreateTestContext(rec2) + c2.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c2.Request.Header.Set("session_id", "session-metadata-reuse") + c2.Request.Header.Set("x-codex-turn-metadata", "turn_meta_payload_2") + result2, err := svc.Forward(context.Background(), c2, account, body) + require.NoError(t, err) + require.NotNil(t, result2) + require.Equal(t, "resp_meta_2", result2.RequestID) + + require.Equal(t, 1, captureDialer.DialCount(), "同一账号两轮请求应复用同一 WS 连接") + require.Len(t, captureConn.writes, 2) + + firstWrite := requestToJSONString(captureConn.writes[0]) + secondWrite := requestToJSONString(captureConn.writes[1]) + require.Equal(t, "turn_meta_payload_1", gjson.Get(firstWrite, "client_metadata.x-codex-turn-metadata").String()) + require.Equal(t, "turn_meta_payload_2", gjson.Get(secondWrite, "client_metadata.x-codex-turn-metadata").String()) +} + +type openAIWSCaptureDialer struct { + mu sync.Mutex + conn *openAIWSCaptureConn + lastHeaders http.Header + handshake http.Header + dialCount int +} + +func (d *openAIWSCaptureDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + _ = ctx + _ = wsURL + _ = proxyURL + d.mu.Lock() + d.lastHeaders = cloneHeader(headers) + d.dialCount++ + respHeaders := cloneHeader(d.handshake) + d.mu.Unlock() + return d.conn, 0, respHeaders, nil +} + +func (d *openAIWSCaptureDialer) DialCount() int { + d.mu.Lock() + defer d.mu.Unlock() + return d.dialCount +} + +type openAIWSCaptureConn struct { + mu sync.Mutex + events [][]byte + lastWrite map[string]any + writes []map[string]any + closed bool +} + +func (c *openAIWSCaptureConn) WriteJSON(ctx context.Context, value any) error { + _ = ctx + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return errOpenAIWSConnClosed + } + if payload, ok := value.(map[string]any); ok { + c.lastWrite = cloneMapStringAny(payload) + c.writes = append(c.writes, cloneMapStringAny(payload)) + } + return nil +} + +func (c *openAIWSCaptureConn) ReadMessage(ctx context.Context) ([]byte, error) { + _ = ctx + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return nil, errOpenAIWSConnClosed + } + if len(c.events) == 0 { + return nil, io.EOF + } + event := c.events[0] + c.events = c.events[1:] + return event, nil +} + +func (c *openAIWSCaptureConn) Ping(ctx context.Context) error { + _ = ctx + return nil +} + +func (c *openAIWSCaptureConn) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + c.closed = true + return nil +} + +func cloneMapStringAny(src map[string]any) map[string]any { + if src == nil { + return nil + } + dst := make(map[string]any, len(src)) + for k, v := range src { + dst[k] = v + } + return dst +} diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go new file mode 100644 index 000000000..342fee1bd --- /dev/null +++ b/backend/internal/service/openai_ws_pool.go @@ -0,0 +1,1064 @@ +package service + +import ( + "context" + "errors" + "fmt" + "math" + "net/http" + "sort" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +const ( + openAIWSConnMaxAge = 60 * time.Minute + openAIWSConnHealthCheckIdle = 30 * time.Second + openAIWSConnHealthCheckTO = 2 * time.Second + openAIWSConnPrewarmExtraDelay = 2 * time.Second +) + +var ( + errOpenAIWSConnClosed = errors.New("openai ws connection closed") + errOpenAIWSConnQueueFull = errors.New("openai ws connection queue full") +) + +type openAIWSDialError struct { + StatusCode int + Err error +} + +func (e *openAIWSDialError) Error() string { + if e == nil { + return "" + } + if e.StatusCode > 0 { + return fmt.Sprintf("openai ws dial failed: status=%d err=%v", e.StatusCode, e.Err) + } + return fmt.Sprintf("openai ws dial failed: %v", e.Err) +} + +func (e *openAIWSDialError) Unwrap() error { + if e == nil { + return nil + } + return e.Err +} + +type openAIWSAcquireRequest struct { + Account *Account + WSURL string + Headers http.Header + ProxyURL string + PreferredConnID string +} + +type openAIWSConnLease struct { + pool *openAIWSConnPool + accountID int64 + conn *openAIWSConn + queueWait time.Duration + reused bool + released atomic.Bool +} + +func (l *openAIWSConnLease) ConnID() string { + if l == nil || l.conn == nil { + return "" + } + return l.conn.id +} + +func (l *openAIWSConnLease) QueueWaitDuration() time.Duration { + if l == nil { + return 0 + } + return l.queueWait +} + +func (l *openAIWSConnLease) Reused() bool { + if l == nil { + return false + } + return l.reused +} + +func (l *openAIWSConnLease) HandshakeHeader(name string) string { + if l == nil || l.conn == nil { + return "" + } + return l.conn.handshakeHeader(name) +} + +func (l *openAIWSConnLease) IsPrewarmed() bool { + if l == nil || l.conn == nil { + return false + } + return l.conn.isPrewarmed() +} + +func (l *openAIWSConnLease) MarkPrewarmed() { + if l == nil || l.conn == nil { + return + } + l.conn.markPrewarmed() +} + +func (l *openAIWSConnLease) WriteJSON(value any, timeout time.Duration) error { + if l == nil || l.conn == nil { + return errOpenAIWSConnClosed + } + return l.conn.writeJSONWithTimeout(value, timeout) +} + +func (l *openAIWSConnLease) ReadMessage(timeout time.Duration) ([]byte, error) { + if l == nil || l.conn == nil { + return nil, errOpenAIWSConnClosed + } + return l.conn.readMessageWithTimeout(timeout) +} + +func (l *openAIWSConnLease) MarkBroken() { + if l == nil || l.pool == nil || l.conn == nil { + return + } + l.pool.evictConn(l.accountID, l.conn.id) +} + +func (l *openAIWSConnLease) Release() { + if l == nil || l.conn == nil { + return + } + if !l.released.CompareAndSwap(false, true) { + return + } + l.conn.release() +} + +type openAIWSConn struct { + id string + ws openAIWSClientConn + + handshakeHeaders http.Header + + leaseCh chan struct{} + closedCh chan struct{} + closeOnce sync.Once + + ioMu sync.Mutex + + waiters atomic.Int32 + createdAtNano atomic.Int64 + lastUsedNano atomic.Int64 + prewarmed atomic.Bool +} + +func newOpenAIWSConn(id string, _ int64, ws openAIWSClientConn, handshakeHeaders http.Header) *openAIWSConn { + now := time.Now() + conn := &openAIWSConn{ + id: id, + ws: ws, + handshakeHeaders: cloneHeader(handshakeHeaders), + leaseCh: make(chan struct{}, 1), + closedCh: make(chan struct{}), + } + conn.leaseCh <- struct{}{} + conn.createdAtNano.Store(now.UnixNano()) + conn.lastUsedNano.Store(now.UnixNano()) + return conn +} + +func (c *openAIWSConn) tryAcquire() bool { + if c == nil { + return false + } + select { + case <-c.closedCh: + return false + default: + } + select { + case <-c.leaseCh: + select { + case <-c.closedCh: + c.release() + return false + default: + } + return true + default: + return false + } +} + +func (c *openAIWSConn) acquire(ctx context.Context) error { + if c == nil { + return errOpenAIWSConnClosed + } + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.closedCh: + return errOpenAIWSConnClosed + case <-c.leaseCh: + select { + case <-c.closedCh: + c.release() + return errOpenAIWSConnClosed + default: + } + return nil + } + } +} + +func (c *openAIWSConn) release() { + if c == nil { + return + } + select { + case c.leaseCh <- struct{}{}: + default: + } + c.touch() +} + +func (c *openAIWSConn) close() { + if c == nil { + return + } + c.closeOnce.Do(func() { + close(c.closedCh) + if c.ws != nil { + _ = c.ws.Close() + } + select { + case c.leaseCh <- struct{}{}: + default: + } + }) +} + +func (c *openAIWSConn) writeJSONWithTimeout(value any, timeout time.Duration) error { + if c == nil { + return errOpenAIWSConnClosed + } + select { + case <-c.closedCh: + return errOpenAIWSConnClosed + default: + } + + c.ioMu.Lock() + defer c.ioMu.Unlock() + if c.ws == nil { + return errOpenAIWSConnClosed + } + writeCtx := context.Background() + if timeout > 0 { + var cancel context.CancelFunc + writeCtx, cancel = context.WithTimeout(writeCtx, timeout) + defer cancel() + } + if err := c.ws.WriteJSON(writeCtx, value); err != nil { + return err + } + c.touch() + return nil +} + +func (c *openAIWSConn) readMessageWithTimeout(timeout time.Duration) ([]byte, error) { + if c == nil { + return nil, errOpenAIWSConnClosed + } + select { + case <-c.closedCh: + return nil, errOpenAIWSConnClosed + default: + } + + c.ioMu.Lock() + defer c.ioMu.Unlock() + if c.ws == nil { + return nil, errOpenAIWSConnClosed + } + readCtx := context.Background() + if timeout > 0 { + var cancel context.CancelFunc + readCtx, cancel = context.WithTimeout(readCtx, timeout) + defer cancel() + } + payload, err := c.ws.ReadMessage(readCtx) + if err != nil { + return nil, err + } + c.touch() + return payload, nil +} + +func (c *openAIWSConn) pingWithTimeout(timeout time.Duration) error { + if c == nil { + return errOpenAIWSConnClosed + } + select { + case <-c.closedCh: + return errOpenAIWSConnClosed + default: + } + + c.ioMu.Lock() + defer c.ioMu.Unlock() + if c.ws == nil { + return errOpenAIWSConnClosed + } + if timeout <= 0 { + timeout = openAIWSConnHealthCheckTO + } + pingCtx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + if err := c.ws.Ping(pingCtx); err != nil { + return err + } + return nil +} + +func (c *openAIWSConn) touch() { + if c == nil { + return + } + c.lastUsedNano.Store(time.Now().UnixNano()) +} + +func (c *openAIWSConn) createdAt() time.Time { + if c == nil { + return time.Time{} + } + nano := c.createdAtNano.Load() + if nano <= 0 { + return time.Time{} + } + return time.Unix(0, nano) +} + +func (c *openAIWSConn) lastUsedAt() time.Time { + if c == nil { + return time.Time{} + } + nano := c.lastUsedNano.Load() + if nano <= 0 { + return time.Time{} + } + return time.Unix(0, nano) +} + +func (c *openAIWSConn) idleDuration(now time.Time) time.Duration { + if c == nil { + return 0 + } + last := c.lastUsedAt() + if last.IsZero() { + return 0 + } + return now.Sub(last) +} + +func (c *openAIWSConn) age(now time.Time) time.Duration { + if c == nil { + return 0 + } + created := c.createdAt() + if created.IsZero() { + return 0 + } + return now.Sub(created) +} + +func (c *openAIWSConn) isLeased() bool { + if c == nil { + return false + } + return len(c.leaseCh) == 0 +} + +func (c *openAIWSConn) handshakeHeader(name string) string { + if c == nil || c.handshakeHeaders == nil { + return "" + } + return strings.TrimSpace(c.handshakeHeaders.Get(strings.TrimSpace(name))) +} + +func (c *openAIWSConn) isPrewarmed() bool { + if c == nil { + return false + } + return c.prewarmed.Load() +} + +func (c *openAIWSConn) markPrewarmed() { + if c == nil { + return + } + c.prewarmed.Store(true) +} + +type openAIWSAccountPool struct { + conns map[string]*openAIWSConn + creating int + lastAcquire *openAIWSAcquireRequest + prewarmActive bool +} + +type OpenAIWSPoolMetricsSnapshot struct { + AcquireTotal int64 + AcquireReuseTotal int64 + AcquireCreateTotal int64 + AcquireQueueWaitTotal int64 + AcquireQueueWaitMsTotal int64 + ScaleUpTotal int64 + ScaleDownTotal int64 +} + +type openAIWSPoolMetrics struct { + acquireTotal atomic.Int64 + acquireReuseTotal atomic.Int64 + acquireCreateTotal atomic.Int64 + acquireQueueWaitTotal atomic.Int64 + acquireQueueWaitMs atomic.Int64 + scaleUpTotal atomic.Int64 + scaleDownTotal atomic.Int64 +} + +type openAIWSConnPool struct { + cfg *config.Config + // 通过接口解耦底层 WS 客户端实现,默认使用 coder/websocket。 + clientDialer openAIWSClientDialer + + mu sync.Mutex + accounts map[int64]*openAIWSAccountPool + seq atomic.Uint64 + + metrics openAIWSPoolMetrics +} + +func newOpenAIWSConnPool(cfg *config.Config) *openAIWSConnPool { + return &openAIWSConnPool{ + cfg: cfg, + clientDialer: newDefaultOpenAIWSClientDialer(), + accounts: make(map[int64]*openAIWSAccountPool), + } +} + +func (p *openAIWSConnPool) SnapshotMetrics() OpenAIWSPoolMetricsSnapshot { + if p == nil { + return OpenAIWSPoolMetricsSnapshot{} + } + return OpenAIWSPoolMetricsSnapshot{ + AcquireTotal: p.metrics.acquireTotal.Load(), + AcquireReuseTotal: p.metrics.acquireReuseTotal.Load(), + AcquireCreateTotal: p.metrics.acquireCreateTotal.Load(), + AcquireQueueWaitTotal: p.metrics.acquireQueueWaitTotal.Load(), + AcquireQueueWaitMsTotal: p.metrics.acquireQueueWaitMs.Load(), + ScaleUpTotal: p.metrics.scaleUpTotal.Load(), + ScaleDownTotal: p.metrics.scaleDownTotal.Load(), + } +} + +func (p *openAIWSConnPool) setClientDialerForTest(dialer openAIWSClientDialer) { + if p == nil || dialer == nil { + return + } + p.clientDialer = dialer +} + +func (p *openAIWSConnPool) Acquire(ctx context.Context, req openAIWSAcquireRequest) (*openAIWSConnLease, error) { + if p != nil { + p.metrics.acquireTotal.Add(1) + } + return p.acquire(ctx, cloneOpenAIWSAcquireRequest(req), 0) +} + +func (p *openAIWSConnPool) acquire(ctx context.Context, req openAIWSAcquireRequest, retry int) (*openAIWSConnLease, error) { + if p == nil || req.Account == nil || req.Account.ID <= 0 { + return nil, errors.New("invalid ws acquire request") + } + if stringsTrim(req.WSURL) == "" { + return nil, errors.New("ws url is empty") + } + + accountID := req.Account.ID + effectiveMaxConns := p.effectiveMaxConnsByAccount(req.Account) + var evicted []*openAIWSConn + + p.mu.Lock() + ap := p.ensureAccountPoolLocked(accountID) + ap.lastAcquire = cloneOpenAIWSAcquireRequestPtr(&req) + evicted = p.cleanupAccountLocked(ap, time.Now(), effectiveMaxConns) + + if preferred := stringsTrim(req.PreferredConnID); preferred != "" { + if conn, ok := ap.conns[preferred]; ok && conn.tryAcquire() { + p.mu.Unlock() + closeOpenAIWSConns(evicted) + if p.shouldHealthCheckConn(conn) { + if err := conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + conn.close() + p.evictConn(accountID, conn.id) + if retry < 1 { + return p.acquire(ctx, req, retry+1) + } + return nil, err + } + } + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, reused: true} + p.metrics.acquireReuseTotal.Add(1) + p.ensureTargetIdleAsync(accountID) + return lease, nil + } + } + + for _, conn := range p.sortedConnsByLoadLocked(ap) { + if conn.tryAcquire() { + p.mu.Unlock() + closeOpenAIWSConns(evicted) + if p.shouldHealthCheckConn(conn) { + if err := conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + conn.close() + p.evictConn(accountID, conn.id) + if retry < 1 { + return p.acquire(ctx, req, retry+1) + } + return nil, err + } + } + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, reused: true} + p.metrics.acquireReuseTotal.Add(1) + p.ensureTargetIdleAsync(accountID) + return lease, nil + } + } + + if len(ap.conns)+ap.creating < effectiveMaxConns { + ap.creating++ + p.mu.Unlock() + closeOpenAIWSConns(evicted) + + conn, dialErr := p.dialConn(ctx, req) + + p.mu.Lock() + ap = p.ensureAccountPoolLocked(accountID) + ap.creating-- + if dialErr != nil { + p.mu.Unlock() + return nil, dialErr + } + ap.conns[conn.id] = conn + p.mu.Unlock() + p.metrics.acquireCreateTotal.Add(1) + + if !conn.tryAcquire() { + if err := conn.acquire(ctx); err != nil { + conn.close() + p.evictConn(accountID, conn.id) + return nil, err + } + } + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn} + p.ensureTargetIdleAsync(accountID) + return lease, nil + } + + target := p.pickLeastBusyConnLocked(ap, req.PreferredConnID) + if target == nil { + p.mu.Unlock() + closeOpenAIWSConns(evicted) + return nil, errOpenAIWSConnClosed + } + if int(target.waiters.Load()) >= p.queueLimitPerConn() { + p.mu.Unlock() + closeOpenAIWSConns(evicted) + return nil, errOpenAIWSConnQueueFull + } + target.waiters.Add(1) + p.mu.Unlock() + closeOpenAIWSConns(evicted) + defer target.waiters.Add(-1) + waitStart := time.Now() + p.metrics.acquireQueueWaitTotal.Add(1) + + if err := target.acquire(ctx); err != nil { + if errors.Is(err, errOpenAIWSConnClosed) && retry < 1 { + return p.acquire(ctx, req, retry+1) + } + return nil, err + } + if p.shouldHealthCheckConn(target) { + if err := target.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + target.release() + target.close() + p.evictConn(accountID, target.id) + if retry < 1 { + return p.acquire(ctx, req, retry+1) + } + return nil, err + } + } + + queueWait := time.Since(waitStart) + p.metrics.acquireQueueWaitMs.Add(queueWait.Milliseconds()) + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: target, queueWait: queueWait, reused: true} + p.metrics.acquireReuseTotal.Add(1) + p.ensureTargetIdleAsync(accountID) + return lease, nil +} + +func (p *openAIWSConnPool) ensureAccountPoolLocked(accountID int64) *openAIWSAccountPool { + ap, ok := p.accounts[accountID] + if ok { + return ap + } + ap = &openAIWSAccountPool{conns: make(map[string]*openAIWSConn)} + p.accounts[accountID] = ap + return ap +} + +func (p *openAIWSConnPool) cleanupAccountLocked(ap *openAIWSAccountPool, now time.Time, maxConns int) []*openAIWSConn { + if ap == nil { + return nil + } + maxAge := p.maxConnAge() + + evicted := make([]*openAIWSConn, 0) + for id, conn := range ap.conns { + select { + case <-conn.closedCh: + delete(ap.conns, id) + evicted = append(evicted, conn) + continue + default: + } + if maxAge > 0 && !conn.isLeased() && conn.age(now) > maxAge { + delete(ap.conns, id) + evicted = append(evicted, conn) + } + } + + if maxConns <= 0 { + maxConns = p.maxConnsHardCap() + } + maxIdle := p.maxIdlePerAccount() + if maxIdle < 0 || maxIdle > maxConns { + maxIdle = maxConns + } + if maxIdle >= 0 && len(ap.conns) > maxIdle { + idleConns := make([]*openAIWSConn, 0, len(ap.conns)) + for _, conn := range ap.conns { + if conn.isLeased() { + continue + } + idleConns = append(idleConns, conn) + } + sort.SliceStable(idleConns, func(i, j int) bool { + return idleConns[i].lastUsedAt().Before(idleConns[j].lastUsedAt()) + }) + redundant := len(ap.conns) - maxIdle + if redundant > len(idleConns) { + redundant = len(idleConns) + } + for i := 0; i < redundant; i++ { + conn := idleConns[i] + delete(ap.conns, conn.id) + evicted = append(evicted, conn) + } + if redundant > 0 { + p.metrics.scaleDownTotal.Add(int64(redundant)) + } + } + + return evicted +} + +func (p *openAIWSConnPool) sortedConnsByLoadLocked(ap *openAIWSAccountPool) []*openAIWSConn { + if ap == nil || len(ap.conns) == 0 { + return nil + } + conns := make([]*openAIWSConn, 0, len(ap.conns)) + for _, conn := range ap.conns { + conns = append(conns, conn) + } + sort.SliceStable(conns, func(i, j int) bool { + wi := conns[i].waiters.Load() + wj := conns[j].waiters.Load() + if wi != wj { + return wi < wj + } + return conns[i].lastUsedAt().Before(conns[j].lastUsedAt()) + }) + return conns +} + +func (p *openAIWSConnPool) pickLeastBusyConnLocked(ap *openAIWSAccountPool, preferredConnID string) *openAIWSConn { + if ap == nil || len(ap.conns) == 0 { + return nil + } + preferredConnID = stringsTrim(preferredConnID) + if preferredConnID != "" { + if conn, ok := ap.conns[preferredConnID]; ok { + return conn + } + } + conns := p.sortedConnsByLoadLocked(ap) + if len(conns) == 0 { + return nil + } + return conns[0] +} + +func accountPoolLoadLocked(ap *openAIWSAccountPool) (inflight int, waiters int) { + if ap == nil { + return 0, 0 + } + for _, conn := range ap.conns { + if conn == nil { + continue + } + if conn.isLeased() { + inflight++ + } + waiters += int(conn.waiters.Load()) + } + return inflight, waiters +} + +// AccountPoolLoad 返回指定账号连接池的并发与排队快照。 +func (p *openAIWSConnPool) AccountPoolLoad(accountID int64) (inflight int, waiters int, conns int) { + if p == nil || accountID <= 0 { + return 0, 0, 0 + } + p.mu.Lock() + defer p.mu.Unlock() + ap := p.accounts[accountID] + if ap == nil { + return 0, 0, 0 + } + inflight, waiters = accountPoolLoadLocked(ap) + return inflight, waiters, len(ap.conns) +} + +func (p *openAIWSConnPool) ensureTargetIdleAsync(accountID int64) { + if p == nil || accountID <= 0 { + return + } + + var req openAIWSAcquireRequest + need := 0 + p.mu.Lock() + ap, ok := p.accounts[accountID] + if !ok || ap == nil || ap.lastAcquire == nil { + p.mu.Unlock() + return + } + if ap.prewarmActive { + p.mu.Unlock() + return + } + effectiveMaxConns := p.maxConnsHardCap() + if ap.lastAcquire != nil && ap.lastAcquire.Account != nil { + effectiveMaxConns = p.effectiveMaxConnsByAccount(ap.lastAcquire.Account) + } + target := p.targetConnCountLocked(ap, effectiveMaxConns) + current := len(ap.conns) + ap.creating + if current >= target { + p.mu.Unlock() + return + } + need = target - current + if need <= 0 { + p.mu.Unlock() + return + } + req = cloneOpenAIWSAcquireRequest(*ap.lastAcquire) + ap.prewarmActive = true + ap.creating += need + p.metrics.scaleUpTotal.Add(int64(need)) + p.mu.Unlock() + + go p.prewarmConns(accountID, req, need) +} + +func (p *openAIWSConnPool) targetConnCountLocked(ap *openAIWSAccountPool, maxConns int) int { + if ap == nil { + return 0 + } + + if maxConns <= 0 { + return 0 + } + + minIdle := p.minIdlePerAccount() + if minIdle < 0 { + minIdle = 0 + } + if minIdle > maxConns { + minIdle = maxConns + } + + inflight, waiters := accountPoolLoadLocked(ap) + utilization := p.targetUtilization() + demand := inflight + waiters + if demand <= 0 { + return minIdle + } + + target := 1 + if demand > 1 { + target = int(math.Ceil(float64(demand) / utilization)) + } + if waiters > 0 && target < len(ap.conns)+1 { + target = len(ap.conns) + 1 + } + if target < minIdle { + target = minIdle + } + if target > maxConns { + target = maxConns + } + return target +} + +func (p *openAIWSConnPool) prewarmConns(accountID int64, req openAIWSAcquireRequest, total int) { + defer func() { + p.mu.Lock() + if ap, ok := p.accounts[accountID]; ok && ap != nil { + ap.prewarmActive = false + } + p.mu.Unlock() + }() + + for i := 0; i < total; i++ { + ctx, cancel := context.WithTimeout(context.Background(), p.dialTimeout()+openAIWSConnPrewarmExtraDelay) + conn, err := p.dialConn(ctx, req) + cancel() + + p.mu.Lock() + ap, ok := p.accounts[accountID] + if !ok || ap == nil { + p.mu.Unlock() + if conn != nil { + conn.close() + } + return + } + if ap.creating > 0 { + ap.creating-- + } + if err != nil { + p.mu.Unlock() + continue + } + if len(ap.conns) >= p.effectiveMaxConnsByAccount(req.Account) { + p.mu.Unlock() + conn.close() + continue + } + ap.conns[conn.id] = conn + p.mu.Unlock() + } +} + +func (p *openAIWSConnPool) evictConn(accountID int64, connID string) { + if p == nil || accountID <= 0 || stringsTrim(connID) == "" { + return + } + var conn *openAIWSConn + p.mu.Lock() + if ap, ok := p.accounts[accountID]; ok { + if c, exists := ap.conns[connID]; exists { + conn = c + delete(ap.conns, connID) + } + } + p.mu.Unlock() + if conn != nil { + conn.close() + } +} + +func (p *openAIWSConnPool) dialConn(ctx context.Context, req openAIWSAcquireRequest) (*openAIWSConn, error) { + if p == nil || p.clientDialer == nil { + return nil, errors.New("openai ws client dialer is nil") + } + conn, status, handshakeHeaders, err := p.clientDialer.Dial(ctx, req.WSURL, req.Headers, req.ProxyURL) + if err != nil { + return nil, &openAIWSDialError{StatusCode: status, Err: err} + } + if conn == nil { + return nil, &openAIWSDialError{StatusCode: status, Err: errors.New("openai ws dialer returned nil connection")} + } + id := p.nextConnID(req.Account.ID) + return newOpenAIWSConn(id, req.Account.ID, conn, handshakeHeaders), nil +} + +func (p *openAIWSConnPool) nextConnID(accountID int64) string { + seq := p.seq.Add(1) + return fmt.Sprintf("oa_ws_%d_%d", accountID, seq) +} + +func (p *openAIWSConnPool) shouldHealthCheckConn(conn *openAIWSConn) bool { + if conn == nil { + return false + } + return conn.idleDuration(time.Now()) >= openAIWSConnHealthCheckIdle +} + +func (p *openAIWSConnPool) maxConnsHardCap() int { + if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.MaxConnsPerAccount > 0 { + return p.cfg.Gateway.OpenAIWS.MaxConnsPerAccount + } + return 8 +} + +func (p *openAIWSConnPool) dynamicMaxConnsEnabled() bool { + if p != nil && p.cfg != nil { + return p.cfg.Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled + } + return false +} + +func (p *openAIWSConnPool) maxConnsFactorByAccount(account *Account) float64 { + if p == nil || p.cfg == nil || account == nil { + return 1.0 + } + switch account.Type { + case AccountTypeOAuth: + if p.cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor > 0 { + return p.cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor + } + case AccountTypeAPIKey: + if p.cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor > 0 { + return p.cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor + } + } + return 1.0 +} + +func (p *openAIWSConnPool) effectiveMaxConnsByAccount(account *Account) int { + hardCap := p.maxConnsHardCap() + if hardCap <= 0 { + return 0 + } + if account == nil || !p.dynamicMaxConnsEnabled() { + return hardCap + } + if account.Concurrency <= 0 { + // 0/-1 等“无限制”并发场景下,仍由全局硬上限兜底。 + return hardCap + } + factor := p.maxConnsFactorByAccount(account) + if factor <= 0 { + factor = 1.0 + } + effective := int(math.Ceil(float64(account.Concurrency) * factor)) + if effective < 1 { + effective = 1 + } + if effective > hardCap { + effective = hardCap + } + return effective +} + +func (p *openAIWSConnPool) minIdlePerAccount() int { + if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.MinIdlePerAccount >= 0 { + return p.cfg.Gateway.OpenAIWS.MinIdlePerAccount + } + return 0 +} + +func (p *openAIWSConnPool) maxIdlePerAccount() int { + if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.MaxIdlePerAccount >= 0 { + return p.cfg.Gateway.OpenAIWS.MaxIdlePerAccount + } + return 4 +} + +func (p *openAIWSConnPool) maxConnAge() time.Duration { + return openAIWSConnMaxAge +} + +func (p *openAIWSConnPool) queueLimitPerConn() int { + if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.QueueLimitPerConn > 0 { + return p.cfg.Gateway.OpenAIWS.QueueLimitPerConn + } + return 256 +} + +func (p *openAIWSConnPool) targetUtilization() float64 { + if p != nil && p.cfg != nil { + ratio := p.cfg.Gateway.OpenAIWS.PoolTargetUtilization + if ratio > 0 && ratio <= 1 { + return ratio + } + } + return 0.7 +} + +func (p *openAIWSConnPool) dialTimeout() time.Duration { + if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.DialTimeoutSeconds > 0 { + return time.Duration(p.cfg.Gateway.OpenAIWS.DialTimeoutSeconds) * time.Second + } + return 10 * time.Second +} + +func cloneOpenAIWSAcquireRequest(req openAIWSAcquireRequest) openAIWSAcquireRequest { + copied := req + copied.Headers = cloneHeader(req.Headers) + copied.WSURL = stringsTrim(req.WSURL) + copied.ProxyURL = stringsTrim(req.ProxyURL) + copied.PreferredConnID = stringsTrim(req.PreferredConnID) + return copied +} + +func cloneOpenAIWSAcquireRequestPtr(req *openAIWSAcquireRequest) *openAIWSAcquireRequest { + if req == nil { + return nil + } + copied := cloneOpenAIWSAcquireRequest(*req) + return &copied +} + +func cloneHeader(src http.Header) http.Header { + if src == nil { + return nil + } + dst := make(http.Header, len(src)) + for k, vals := range src { + if len(vals) == 0 { + dst[k] = nil + continue + } + copied := make([]string, len(vals)) + copy(copied, vals) + dst[k] = copied + } + return dst +} + +func closeOpenAIWSConns(conns []*openAIWSConn) { + if len(conns) == 0 { + return + } + for _, conn := range conns { + if conn == nil { + continue + } + conn.close() + } +} + +func stringsTrim(value string) string { + return strings.TrimSpace(value) +} diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go new file mode 100644 index 000000000..3f9e84d5e --- /dev/null +++ b/backend/internal/service/openai_ws_pool_test.go @@ -0,0 +1,285 @@ +package service + +import ( + "context" + "errors" + "net/http" + "sync" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestOpenAIWSConnPool_CleanupStaleAndTrimIdle(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + pool := newOpenAIWSConnPool(cfg) + + accountID := int64(10) + ap := pool.ensureAccountPoolLocked(accountID) + + stale := newOpenAIWSConn("stale", accountID, nil, nil) + stale.createdAtNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) + stale.lastUsedNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) + + idleOld := newOpenAIWSConn("idle_old", accountID, nil, nil) + idleOld.lastUsedNano.Store(time.Now().Add(-10 * time.Minute).UnixNano()) + + idleNew := newOpenAIWSConn("idle_new", accountID, nil, nil) + idleNew.lastUsedNano.Store(time.Now().Add(-1 * time.Minute).UnixNano()) + + ap.conns[stale.id] = stale + ap.conns[idleOld.id] = idleOld + ap.conns[idleNew.id] = idleNew + + evicted := pool.cleanupAccountLocked(ap, time.Now(), pool.maxConnsHardCap()) + closeOpenAIWSConns(evicted) + + require.Nil(t, ap.conns["stale"], "stale connection should be rotated") + require.Nil(t, ap.conns["idle_old"], "old idle should be trimmed by max_idle") + require.NotNil(t, ap.conns["idle_new"], "newer idle should be kept") +} + +func TestOpenAIWSConnPool_TargetConnCountAdaptive(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 6 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.5 + + pool := newOpenAIWSConnPool(cfg) + ap := pool.ensureAccountPoolLocked(88) + + conn1 := newOpenAIWSConn("c1", 88, nil, nil) + conn2 := newOpenAIWSConn("c2", 88, nil, nil) + require.True(t, conn1.tryAcquire()) + require.True(t, conn2.tryAcquire()) + conn1.waiters.Store(1) + conn2.waiters.Store(1) + + ap.conns[conn1.id] = conn1 + ap.conns[conn2.id] = conn2 + + target := pool.targetConnCountLocked(ap, pool.maxConnsHardCap()) + require.Equal(t, 6, target, "应按 inflight+waiters 与 target_utilization 自适应扩容到上限") + + conn1.release() + conn2.release() + conn1.waiters.Store(0) + conn2.waiters.Store(0) + target = pool.targetConnCountLocked(ap, pool.maxConnsHardCap()) + require.Equal(t, 1, target, "低负载时应缩回到最小空闲连接") +} + +func TestOpenAIWSConnPool_TargetConnCountMinIdleZero(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 4 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.8 + + pool := newOpenAIWSConnPool(cfg) + ap := pool.ensureAccountPoolLocked(66) + + target := pool.targetConnCountLocked(ap, pool.maxConnsHardCap()) + require.Equal(t, 0, target, "min_idle=0 且无负载时应允许缩容到 0") +} + +func TestOpenAIWSConnPool_EnsureTargetIdleAsync(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 4 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 2 + cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 1 + + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(&openAIWSFakeDialer{}) + + accountID := int64(77) + account := &Account{ID: accountID, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + pool.mu.Lock() + ap := pool.ensureAccountPoolLocked(accountID) + ap.lastAcquire = &openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + } + pool.mu.Unlock() + + pool.ensureTargetIdleAsync(accountID) + + require.Eventually(t, func() bool { + pool.mu.Lock() + defer pool.mu.Unlock() + return len(pool.accounts[accountID].conns) >= 2 + }, 2*time.Second, 20*time.Millisecond) + + metrics := pool.SnapshotMetrics() + require.GreaterOrEqual(t, metrics.ScaleUpTotal, int64(2)) +} + +func TestOpenAIWSConnPool_AcquireQueueWaitMetrics(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 4 + + pool := newOpenAIWSConnPool(cfg) + accountID := int64(99) + account := &Account{ID: accountID, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + conn := newOpenAIWSConn("busy", accountID, &openAIWSFakeConn{}, nil) + require.True(t, conn.tryAcquire()) // 占用连接,触发后续排队 + + pool.mu.Lock() + ap := pool.ensureAccountPoolLocked(accountID) + ap.conns[conn.id] = conn + ap.lastAcquire = &openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + } + pool.mu.Unlock() + + go func() { + time.Sleep(60 * time.Millisecond) + conn.release() + }() + + lease, err := pool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + }) + require.NoError(t, err) + require.NotNil(t, lease) + require.True(t, lease.Reused()) + require.GreaterOrEqual(t, lease.QueueWaitDuration(), 50*time.Millisecond) + lease.Release() + + metrics := pool.SnapshotMetrics() + require.GreaterOrEqual(t, metrics.AcquireQueueWaitTotal, int64(1)) + require.Greater(t, metrics.AcquireQueueWaitMsTotal, int64(0)) +} + +func TestOpenAIWSConnPool_EffectiveMaxConnsByAccount(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 8 + cfg.Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled = true + cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor = 1.0 + cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor = 0.6 + + pool := newOpenAIWSConnPool(cfg) + + oauthHigh := &Account{Platform: PlatformOpenAI, Type: AccountTypeOAuth, Concurrency: 10} + require.Equal(t, 8, pool.effectiveMaxConnsByAccount(oauthHigh), "应受全局硬上限约束") + + oauthLow := &Account{Platform: PlatformOpenAI, Type: AccountTypeOAuth, Concurrency: 3} + require.Equal(t, 3, pool.effectiveMaxConnsByAccount(oauthLow)) + + apiKeyHigh := &Account{Platform: PlatformOpenAI, Type: AccountTypeAPIKey, Concurrency: 10} + require.Equal(t, 6, pool.effectiveMaxConnsByAccount(apiKeyHigh), "API Key 应按系数缩放") + + apiKeyLow := &Account{Platform: PlatformOpenAI, Type: AccountTypeAPIKey, Concurrency: 1} + require.Equal(t, 1, pool.effectiveMaxConnsByAccount(apiKeyLow), "最小值应保持为 1") + + unlimited := &Account{Platform: PlatformOpenAI, Type: AccountTypeOAuth, Concurrency: 0} + require.Equal(t, 8, pool.effectiveMaxConnsByAccount(unlimited), "无限并发应回退到全局硬上限") + + require.Equal(t, 8, pool.effectiveMaxConnsByAccount(nil), "缺少账号上下文应回退到全局硬上限") +} + +func TestOpenAIWSConnPool_EffectiveMaxConnsDisabledFallbackHardCap(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 8 + cfg.Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled = false + cfg.Gateway.OpenAIWS.OAuthMaxConnsFactor = 1.0 + cfg.Gateway.OpenAIWS.APIKeyMaxConnsFactor = 1.0 + + pool := newOpenAIWSConnPool(cfg) + account := &Account{Platform: PlatformOpenAI, Type: AccountTypeOAuth, Concurrency: 2} + require.Equal(t, 8, pool.effectiveMaxConnsByAccount(account), "关闭动态模式后应保持旧行为") +} + +type openAIWSFakeDialer struct{} + +func (d *openAIWSFakeDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + _ = ctx + _ = wsURL + _ = headers + _ = proxyURL + return &openAIWSFakeConn{}, 0, nil, nil +} + +type openAIWSFakeConn struct { + mu sync.Mutex + closed bool + payload [][]byte +} + +func (c *openAIWSFakeConn) WriteJSON(ctx context.Context, value any) error { + _ = ctx + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return errors.New("closed") + } + c.payload = append(c.payload, []byte("ok")) + _ = value + return nil +} + +func (c *openAIWSFakeConn) ReadMessage(ctx context.Context) ([]byte, error) { + _ = ctx + c.mu.Lock() + defer c.mu.Unlock() + if c.closed { + return nil, errors.New("closed") + } + return []byte(`{"type":"response.completed","response":{"id":"resp_fake"}}`), nil +} + +func (c *openAIWSFakeConn) Ping(ctx context.Context) error { + _ = ctx + return nil +} + +func (c *openAIWSFakeConn) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + c.closed = true + return nil +} + +type openAIWSNilConnDialer struct{} + +func (d *openAIWSNilConnDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + _ = ctx + _ = wsURL + _ = headers + _ = proxyURL + return nil, 200, nil, nil +} + +func TestOpenAIWSConnPool_DialConnNilConnection(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 1 + + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(&openAIWSNilConnDialer{}) + account := &Account{ID: 91, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + + _, err := pool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "nil connection") +} diff --git a/backend/internal/service/openai_ws_protocol_forward_test.go b/backend/internal/service/openai_ws_protocol_forward_test.go new file mode 100644 index 000000000..92a31b1cf --- /dev/null +++ b/backend/internal/service/openai_ws_protocol_forward_test.go @@ -0,0 +1,395 @@ +package service + +import ( + "context" + "io" + "net/http" + "net/http/httptest" + "strings" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +func TestOpenAIGatewayService_Forward_PreservePreviousResponseIDWhenWSEnabled(t *testing.T) { + gin.SetMode(gin.TestMode) + wsFallbackServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.NotFound(w, r) + })) + defer wsFallbackServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader( + `{"usage":{"input_tokens":1,"output_tokens":2,"input_tokens_details":{"cached_tokens":0}}}`, + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + } + + account := &Account{ + ID: 1, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsFallbackServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_123","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_123", gjson.GetBytes(upstream.lastBody, "previous_response_id").String()) +} + +func TestOpenAIGatewayService_Forward_RemovePreviousResponseIDWhenWSDisabled(t *testing.T) { + gin.SetMode(gin.TestMode) + wsFallbackServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.NotFound(w, r) + })) + defer wsFallbackServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader( + `{"usage":{"input_tokens":1,"output_tokens":2,"input_tokens_details":{"cached_tokens":0}}}`, + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = false + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + } + + account := &Account{ + ID: 1, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsFallbackServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_123","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.False(t, gjson.GetBytes(upstream.lastBody, "previous_response_id").Exists()) +} + +func TestOpenAIGatewayService_Forward_WSv2Dial426FallbackHTTP(t *testing.T) { + gin.SetMode(gin.TestMode) + ws426Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUpgradeRequired) + _, _ = w.Write([]byte(`upgrade required`)) + })) + defer ws426Server.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader( + `{"usage":{"input_tokens":8,"output_tokens":9,"input_tokens_details":{"cached_tokens":1}}}`, + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + } + + account := &Account{ + ID: 12, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": ws426Server.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_426","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_426", gjson.GetBytes(upstream.lastBody, "previous_response_id").String(), "426 回退 HTTP 后仍应保留 previous_response_id") +} + +func TestOpenAIGatewayService_Forward_WSv2FallbackCoolingSkipWS(t *testing.T) { + gin.SetMode(gin.TestMode) + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + http.NotFound(w, r) + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader( + `{"usage":{"input_tokens":2,"output_tokens":3,"input_tokens_details":{"cached_tokens":0}}}`, + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 30 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + } + + account := &Account{ + ID: 21, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + svc.markOpenAIWSFallbackCooling(account.ID, "upgrade_required") + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_cooling","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_cooling", gjson.GetBytes(upstream.lastBody, "previous_response_id").String()) + + v, ok := c.Get("openai_ws_fallback_cooling") + require.True(t, ok) + require.Equal(t, true, v) +} + +func TestOpenAIGatewayService_Forward_ReturnErrorWhenOnlyWSv1Enabled(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader( + `{"usage":{"input_tokens":1,"output_tokens":2,"input_tokens_details":{"cached_tokens":0}}}`, + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsockets = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = false + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + } + + account := &Account{ + ID: 31, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": "https://api.openai.com/v1/responses", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_v1","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "ws v1") + require.Equal(t, http.StatusBadRequest, rec.Code) + require.Contains(t, rec.Body.String(), "WSv1") + require.Nil(t, upstream.lastReq, "WSv1 不支持时不应触发 HTTP 上游请求") +} + +func TestNewOpenAIGatewayService_InitializesOpenAIWSResolver(t *testing.T) { + cfg := &config.Config{} + svc := NewOpenAIGatewayService( + nil, + nil, + nil, + nil, + nil, + cfg, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + nil, + ) + + decision := svc.getOpenAIWSProtocolResolver().Resolve(nil) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "account_missing", decision.Reason) +} + +func TestOpenAIGatewayService_Forward_WSv2FallbackWhenResponseAlreadyWrittenReturnsWSError(t *testing.T) { + gin.SetMode(gin.TestMode) + ws426Server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusUpgradeRequired) + _, _ = w.Write([]byte(`upgrade required`)) + })) + defer ws426Server.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + c.String(http.StatusAccepted, "already-written") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + } + + account := &Account{ + ID: 41, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": ws426Server.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "ws fallback") + require.Nil(t, upstream.lastReq, "已写下游响应时,不应再回退 HTTP") +} diff --git a/backend/internal/service/openai_ws_protocol_resolver.go b/backend/internal/service/openai_ws_protocol_resolver.go new file mode 100644 index 000000000..915adc750 --- /dev/null +++ b/backend/internal/service/openai_ws_protocol_resolver.go @@ -0,0 +1,88 @@ +package service + +import "github.com/Wei-Shaw/sub2api/internal/config" + +// OpenAIUpstreamTransport 表示 OpenAI 上游传输协议。 +type OpenAIUpstreamTransport string + +const ( + OpenAIUpstreamTransportHTTPSSE OpenAIUpstreamTransport = "http_sse" + OpenAIUpstreamTransportResponsesWebsocket OpenAIUpstreamTransport = "responses_websockets" + OpenAIUpstreamTransportResponsesWebsocketV2 OpenAIUpstreamTransport = "responses_websockets_v2" +) + +// OpenAIWSProtocolDecision 表示协议决策结果。 +type OpenAIWSProtocolDecision struct { + Transport OpenAIUpstreamTransport + Reason string +} + +// OpenAIWSProtocolResolver 定义 OpenAI 上游协议决策。 +type OpenAIWSProtocolResolver interface { + Resolve(account *Account) OpenAIWSProtocolDecision +} + +type defaultOpenAIWSProtocolResolver struct { + cfg *config.Config +} + +// NewOpenAIWSProtocolResolver 创建默认协议决策器。 +func NewOpenAIWSProtocolResolver(cfg *config.Config) OpenAIWSProtocolResolver { + return &defaultOpenAIWSProtocolResolver{cfg: cfg} +} + +func (r *defaultOpenAIWSProtocolResolver) Resolve(account *Account) OpenAIWSProtocolDecision { + if account == nil { + return openAIWSHTTPDecision("account_missing") + } + if !account.IsOpenAI() { + return openAIWSHTTPDecision("platform_not_openai") + } + if account.IsOpenAIPassthroughEnabled() { + // 透传优先,必须保持原线路。 + return openAIWSHTTPDecision("passthrough_priority") + } + if account.IsOpenAIWSForceHTTPEnabled() { + return openAIWSHTTPDecision("account_force_http") + } + if r == nil || r.cfg == nil { + return openAIWSHTTPDecision("config_missing") + } + + wsCfg := r.cfg.Gateway.OpenAIWS + if wsCfg.ForceHTTP { + return openAIWSHTTPDecision("global_force_http") + } + if !wsCfg.Enabled { + return openAIWSHTTPDecision("global_disabled") + } + if account.IsOpenAIOAuth() && !wsCfg.OAuthEnabled { + return openAIWSHTTPDecision("oauth_disabled") + } + if account.IsOpenAIApiKey() && !wsCfg.APIKeyEnabled { + return openAIWSHTTPDecision("apikey_disabled") + } + if !account.IsOpenAIResponsesWebSocketV2Enabled() { + return openAIWSHTTPDecision("account_disabled") + } + if wsCfg.ResponsesWebsocketsV2 { + return OpenAIWSProtocolDecision{ + Transport: OpenAIUpstreamTransportResponsesWebsocketV2, + Reason: "ws_v2_enabled", + } + } + if wsCfg.ResponsesWebsockets { + return OpenAIWSProtocolDecision{ + Transport: OpenAIUpstreamTransportResponsesWebsocket, + Reason: "ws_v1_enabled", + } + } + return openAIWSHTTPDecision("feature_disabled") +} + +func openAIWSHTTPDecision(reason string) OpenAIWSProtocolDecision { + return OpenAIWSProtocolDecision{ + Transport: OpenAIUpstreamTransportHTTPSSE, + Reason: reason, + } +} diff --git a/backend/internal/service/openai_ws_protocol_resolver_test.go b/backend/internal/service/openai_ws_protocol_resolver_test.go new file mode 100644 index 000000000..7b252f60c --- /dev/null +++ b/backend/internal/service/openai_ws_protocol_resolver_test.go @@ -0,0 +1,109 @@ +package service + +import ( + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +func TestOpenAIWSProtocolResolver_Resolve(t *testing.T) { + baseCfg := &config.Config{} + baseCfg.Gateway.OpenAIWS.Enabled = true + baseCfg.Gateway.OpenAIWS.OAuthEnabled = true + baseCfg.Gateway.OpenAIWS.APIKeyEnabled = true + baseCfg.Gateway.OpenAIWS.ResponsesWebsockets = false + baseCfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + + openAIOAuthEnabled := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Extra: map[string]any{ + "openai_oauth_responses_websockets_v2_enabled": true, + }, + } + + t.Run("v2优先", func(t *testing.T) { + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(openAIOAuthEnabled) + require.Equal(t, OpenAIUpstreamTransportResponsesWebsocketV2, decision.Transport) + require.Equal(t, "ws_v2_enabled", decision.Reason) + }) + + t.Run("v2关闭时回退v1", func(t *testing.T) { + cfg := *baseCfg + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = false + cfg.Gateway.OpenAIWS.ResponsesWebsockets = true + + decision := NewOpenAIWSProtocolResolver(&cfg).Resolve(openAIOAuthEnabled) + require.Equal(t, OpenAIUpstreamTransportResponsesWebsocket, decision.Transport) + require.Equal(t, "ws_v1_enabled", decision.Reason) + }) + + t.Run("透传优先强制HTTP", func(t *testing.T) { + account := *openAIOAuthEnabled + account.Extra = map[string]any{ + "openai_oauth_responses_websockets_v2_enabled": true, + "openai_passthrough": true, + } + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(&account) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "passthrough_priority", decision.Reason) + }) + + t.Run("账号级强制HTTP", func(t *testing.T) { + account := *openAIOAuthEnabled + account.Extra = map[string]any{ + "openai_oauth_responses_websockets_v2_enabled": true, + "openai_ws_force_http": true, + } + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(&account) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "account_force_http", decision.Reason) + }) + + t.Run("全局关闭保持HTTP", func(t *testing.T) { + cfg := *baseCfg + cfg.Gateway.OpenAIWS.Enabled = false + decision := NewOpenAIWSProtocolResolver(&cfg).Resolve(openAIOAuthEnabled) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "global_disabled", decision.Reason) + }) + + t.Run("账号开关关闭保持HTTP", func(t *testing.T) { + account := *openAIOAuthEnabled + account.Extra = map[string]any{ + "openai_oauth_responses_websockets_v2_enabled": false, + } + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(&account) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "account_disabled", decision.Reason) + }) + + t.Run("OAuth账号不会读取API Key专用开关", func(t *testing.T) { + account := *openAIOAuthEnabled + account.Extra = map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + } + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(&account) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "account_disabled", decision.Reason) + }) + + t.Run("兼容旧键openai_ws_enabled", func(t *testing.T) { + account := *openAIOAuthEnabled + account.Extra = map[string]any{ + "openai_ws_enabled": true, + } + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(&account) + require.Equal(t, OpenAIUpstreamTransportResponsesWebsocketV2, decision.Transport) + require.Equal(t, "ws_v2_enabled", decision.Reason) + }) + + t.Run("按账号类型开关控制", func(t *testing.T) { + cfg := *baseCfg + cfg.Gateway.OpenAIWS.OAuthEnabled = false + decision := NewOpenAIWSProtocolResolver(&cfg).Resolve(openAIOAuthEnabled) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "oauth_disabled", decision.Reason) + }) +} diff --git a/backend/internal/service/openai_ws_state_store.go b/backend/internal/service/openai_ws_state_store.go new file mode 100644 index 000000000..8758e9ff2 --- /dev/null +++ b/backend/internal/service/openai_ws_state_store.go @@ -0,0 +1,285 @@ +package service + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + "sync" + "sync/atomic" + "time" +) + +const ( + openAIWSResponseAccountCachePrefix = "openai:response:" + openAIWSStateStoreCleanupInterval = time.Minute +) + +type openAIWSAccountBinding struct { + accountID int64 + expiresAt time.Time +} + +type openAIWSConnBinding struct { + connID string + expiresAt time.Time +} + +type openAIWSTurnStateBinding struct { + turnState string + expiresAt time.Time +} + +// OpenAIWSStateStore 管理 WSv2 的粘连状态。 +// - response_id -> account_id 用于续链路由 +// - response_id -> conn_id 用于连接内上下文复用 +// +// response_id -> account_id 优先走 GatewayCache(Redis),同时维护本地热缓存。 +// response_id -> conn_id 仅在本进程内有效。 +type OpenAIWSStateStore interface { + BindResponseAccount(ctx context.Context, groupID int64, responseID string, accountID int64, ttl time.Duration) error + GetResponseAccount(ctx context.Context, groupID int64, responseID string) (int64, error) + DeleteResponseAccount(ctx context.Context, groupID int64, responseID string) error + + BindResponseConn(responseID, connID string, ttl time.Duration) + GetResponseConn(responseID string) (string, bool) + DeleteResponseConn(responseID string) + + BindSessionTurnState(groupID int64, sessionHash, turnState string, ttl time.Duration) + GetSessionTurnState(groupID int64, sessionHash string) (string, bool) + DeleteSessionTurnState(groupID int64, sessionHash string) +} + +type defaultOpenAIWSStateStore struct { + cache GatewayCache + + mu sync.RWMutex + responseToAccount map[string]openAIWSAccountBinding + responseToConn map[string]openAIWSConnBinding + sessionToTurnState map[string]openAIWSTurnStateBinding + + lastCleanupUnixNano atomic.Int64 +} + +// NewOpenAIWSStateStore 创建默认 WS 状态存储。 +func NewOpenAIWSStateStore(cache GatewayCache) OpenAIWSStateStore { + store := &defaultOpenAIWSStateStore{ + cache: cache, + responseToAccount: make(map[string]openAIWSAccountBinding, 256), + responseToConn: make(map[string]openAIWSConnBinding, 256), + sessionToTurnState: make(map[string]openAIWSTurnStateBinding, 256), + } + store.lastCleanupUnixNano.Store(time.Now().UnixNano()) + return store +} + +func (s *defaultOpenAIWSStateStore) BindResponseAccount(ctx context.Context, groupID int64, responseID string, accountID int64, ttl time.Duration) error { + id := normalizeOpenAIWSResponseID(responseID) + if id == "" || accountID <= 0 { + return nil + } + ttl = normalizeOpenAIWSTTL(ttl) + s.maybeCleanup() + + expiresAt := time.Now().Add(ttl) + s.mu.Lock() + s.responseToAccount[id] = openAIWSAccountBinding{accountID: accountID, expiresAt: expiresAt} + s.mu.Unlock() + + if s.cache == nil { + return nil + } + cacheKey := openAIWSResponseAccountCacheKey(id) + return s.cache.SetSessionAccountID(ctx, groupID, cacheKey, accountID, ttl) +} + +func (s *defaultOpenAIWSStateStore) GetResponseAccount(ctx context.Context, groupID int64, responseID string) (int64, error) { + id := normalizeOpenAIWSResponseID(responseID) + if id == "" { + return 0, nil + } + s.maybeCleanup() + + now := time.Now() + s.mu.RLock() + if binding, ok := s.responseToAccount[id]; ok { + if now.Before(binding.expiresAt) { + accountID := binding.accountID + s.mu.RUnlock() + return accountID, nil + } + } + s.mu.RUnlock() + + if s.cache == nil { + return 0, nil + } + + cacheKey := openAIWSResponseAccountCacheKey(id) + accountID, err := s.cache.GetSessionAccountID(ctx, groupID, cacheKey) + if err != nil || accountID <= 0 { + // 缓存读取失败不阻断主流程,按未命中降级。 + return 0, nil + } + return accountID, nil +} + +func (s *defaultOpenAIWSStateStore) DeleteResponseAccount(ctx context.Context, groupID int64, responseID string) error { + id := normalizeOpenAIWSResponseID(responseID) + if id == "" { + return nil + } + s.mu.Lock() + delete(s.responseToAccount, id) + s.mu.Unlock() + + if s.cache == nil { + return nil + } + return s.cache.DeleteSessionAccountID(ctx, groupID, openAIWSResponseAccountCacheKey(id)) +} + +func (s *defaultOpenAIWSStateStore) BindResponseConn(responseID, connID string, ttl time.Duration) { + id := normalizeOpenAIWSResponseID(responseID) + conn := strings.TrimSpace(connID) + if id == "" || conn == "" { + return + } + ttl = normalizeOpenAIWSTTL(ttl) + s.maybeCleanup() + + s.mu.Lock() + s.responseToConn[id] = openAIWSConnBinding{ + connID: conn, + expiresAt: time.Now().Add(ttl), + } + s.mu.Unlock() +} + +func (s *defaultOpenAIWSStateStore) GetResponseConn(responseID string) (string, bool) { + id := normalizeOpenAIWSResponseID(responseID) + if id == "" { + return "", false + } + s.maybeCleanup() + + now := time.Now() + s.mu.RLock() + binding, ok := s.responseToConn[id] + s.mu.RUnlock() + if !ok || now.After(binding.expiresAt) || strings.TrimSpace(binding.connID) == "" { + return "", false + } + return binding.connID, true +} + +func (s *defaultOpenAIWSStateStore) DeleteResponseConn(responseID string) { + id := normalizeOpenAIWSResponseID(responseID) + if id == "" { + return + } + s.mu.Lock() + delete(s.responseToConn, id) + s.mu.Unlock() +} + +func (s *defaultOpenAIWSStateStore) BindSessionTurnState(groupID int64, sessionHash, turnState string, ttl time.Duration) { + key := openAIWSSessionTurnStateKey(groupID, sessionHash) + state := strings.TrimSpace(turnState) + if key == "" || state == "" { + return + } + ttl = normalizeOpenAIWSTTL(ttl) + s.maybeCleanup() + + s.mu.Lock() + s.sessionToTurnState[key] = openAIWSTurnStateBinding{ + turnState: state, + expiresAt: time.Now().Add(ttl), + } + s.mu.Unlock() +} + +func (s *defaultOpenAIWSStateStore) GetSessionTurnState(groupID int64, sessionHash string) (string, bool) { + key := openAIWSSessionTurnStateKey(groupID, sessionHash) + if key == "" { + return "", false + } + s.maybeCleanup() + + now := time.Now() + s.mu.RLock() + binding, ok := s.sessionToTurnState[key] + s.mu.RUnlock() + if !ok || now.After(binding.expiresAt) || strings.TrimSpace(binding.turnState) == "" { + return "", false + } + return binding.turnState, true +} + +func (s *defaultOpenAIWSStateStore) DeleteSessionTurnState(groupID int64, sessionHash string) { + key := openAIWSSessionTurnStateKey(groupID, sessionHash) + if key == "" { + return + } + s.mu.Lock() + delete(s.sessionToTurnState, key) + s.mu.Unlock() +} + +func (s *defaultOpenAIWSStateStore) maybeCleanup() { + if s == nil { + return + } + now := time.Now() + last := time.Unix(0, s.lastCleanupUnixNano.Load()) + if now.Sub(last) < openAIWSStateStoreCleanupInterval { + return + } + if !s.lastCleanupUnixNano.CompareAndSwap(last.UnixNano(), now.UnixNano()) { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + for key, binding := range s.responseToAccount { + if now.After(binding.expiresAt) { + delete(s.responseToAccount, key) + } + } + for key, binding := range s.responseToConn { + if now.After(binding.expiresAt) { + delete(s.responseToConn, key) + } + } + for key, binding := range s.sessionToTurnState { + if now.After(binding.expiresAt) { + delete(s.sessionToTurnState, key) + } + } +} + +func normalizeOpenAIWSResponseID(responseID string) string { + return strings.TrimSpace(responseID) +} + +func openAIWSResponseAccountCacheKey(responseID string) string { + sum := sha256.Sum256([]byte(responseID)) + return openAIWSResponseAccountCachePrefix + hex.EncodeToString(sum[:]) +} + +func normalizeOpenAIWSTTL(ttl time.Duration) time.Duration { + if ttl <= 0 { + return time.Hour + } + return ttl +} + +func openAIWSSessionTurnStateKey(groupID int64, sessionHash string) string { + hash := strings.TrimSpace(sessionHash) + if hash == "" { + return "" + } + return fmt.Sprintf("%d:%s", groupID, hash) +} diff --git a/backend/internal/service/openai_ws_state_store_test.go b/backend/internal/service/openai_ws_state_store_test.go new file mode 100644 index 000000000..5e24310b8 --- /dev/null +++ b/backend/internal/service/openai_ws_state_store_test.go @@ -0,0 +1,76 @@ +package service + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestOpenAIWSStateStore_BindGetDeleteResponseAccount(t *testing.T) { + cache := &stubGatewayCache{} + store := NewOpenAIWSStateStore(cache) + ctx := context.Background() + groupID := int64(7) + + require.NoError(t, store.BindResponseAccount(ctx, groupID, "resp_abc", 101, time.Minute)) + + accountID, err := store.GetResponseAccount(ctx, groupID, "resp_abc") + require.NoError(t, err) + require.Equal(t, int64(101), accountID) + + require.NoError(t, store.DeleteResponseAccount(ctx, groupID, "resp_abc")) + accountID, err = store.GetResponseAccount(ctx, groupID, "resp_abc") + require.NoError(t, err) + require.Zero(t, accountID) +} + +func TestOpenAIWSStateStore_ResponseConnTTL(t *testing.T) { + store := NewOpenAIWSStateStore(nil) + store.BindResponseConn("resp_conn", "conn_1", 30*time.Millisecond) + + connID, ok := store.GetResponseConn("resp_conn") + require.True(t, ok) + require.Equal(t, "conn_1", connID) + + time.Sleep(60 * time.Millisecond) + _, ok = store.GetResponseConn("resp_conn") + require.False(t, ok) +} + +func TestOpenAIWSStateStore_SessionTurnStateTTL(t *testing.T) { + store := NewOpenAIWSStateStore(nil) + store.BindSessionTurnState(9, "session_hash_1", "turn_state_1", 30*time.Millisecond) + + state, ok := store.GetSessionTurnState(9, "session_hash_1") + require.True(t, ok) + require.Equal(t, "turn_state_1", state) + + // group 隔离 + _, ok = store.GetSessionTurnState(10, "session_hash_1") + require.False(t, ok) + + time.Sleep(60 * time.Millisecond) + _, ok = store.GetSessionTurnState(9, "session_hash_1") + require.False(t, ok) +} + +func TestOpenAIWSStateStore_GetResponseAccount_NoStaleAfterCacheMiss(t *testing.T) { + cache := &stubGatewayCache{sessionBindings: map[string]int64{}} + store := NewOpenAIWSStateStore(cache) + ctx := context.Background() + groupID := int64(17) + responseID := "resp_cache_stale" + cacheKey := openAIWSResponseAccountCacheKey(responseID) + + cache.sessionBindings[cacheKey] = 501 + accountID, err := store.GetResponseAccount(ctx, groupID, responseID) + require.NoError(t, err) + require.Equal(t, int64(501), accountID) + + delete(cache.sessionBindings, cacheKey) + accountID, err = store.GetResponseAccount(ctx, groupID, responseID) + require.NoError(t, err) + require.Zero(t, accountID, "上游缓存失效后不应继续命中本地陈旧映射") +} diff --git a/backend/internal/service/ops_upstream_context.go b/backend/internal/service/ops_upstream_context.go index 23c154ce0..c05945786 100644 --- a/backend/internal/service/ops_upstream_context.go +++ b/backend/internal/service/ops_upstream_context.go @@ -27,6 +27,10 @@ const ( OpsUpstreamLatencyMsKey = "ops_upstream_latency_ms" OpsResponseLatencyMsKey = "ops_response_latency_ms" OpsTimeToFirstTokenMsKey = "ops_time_to_first_token_ms" + // OpenAI WS 关键观测字段 + OpsOpenAIWSQueueWaitMsKey = "ops_openai_ws_queue_wait_ms" + OpsOpenAIWSConnReusedKey = "ops_openai_ws_conn_reused" + OpsOpenAIWSConnIDKey = "ops_openai_ws_conn_id" // OpsSkipPassthroughKey 由 applyErrorPassthroughRule 在命中 skip_monitoring=true 的规则时设置。 // ops_error_logger 中间件检查此 key,为 true 时跳过错误记录。 diff --git a/backup/internal/store/entstore/store.go b/backup/internal/store/entstore/store.go index b276e9279..54f8c40fb 100644 --- a/backup/internal/store/entstore/store.go +++ b/backup/internal/store/entstore/store.go @@ -896,15 +896,28 @@ func (s *Store) CreateBackupJob(ctx context.Context, input CreateBackupJobInput) input.S3ProfileID = strings.TrimSpace(input.S3ProfileID) input.PostgresID = strings.TrimSpace(input.PostgresID) input.RedisID = strings.TrimSpace(input.RedisID) + needsPostgres := backupTypeNeedsPostgres(input.BackupType) + needsRedis := backupTypeNeedsRedis(input.BackupType) - if backupTypeNeedsPostgres(input.BackupType) { + // 仅保留本次备份类型真正需要的来源配置,避免写入无关 profile 造成“被占用”误判。 + if !needsPostgres { + input.PostgresID = "" + } + if !needsRedis { + input.RedisID = "" + } + if !input.UploadToS3 { + input.S3ProfileID = "" + } + + if needsPostgres { resolvedID, resolveErr := s.resolveSourceProfileID(ctx, backupsourceconfig.SourceTypePostgres.String(), input.PostgresID) if resolveErr != nil { return nil, false, resolveErr } input.PostgresID = resolvedID } - if backupTypeNeedsRedis(input.BackupType) { + if needsRedis { resolvedID, resolveErr := s.resolveSourceProfileID(ctx, backupsourceconfig.SourceTypeRedis.String(), input.RedisID) if resolveErr != nil { return nil, false, resolveErr @@ -974,29 +987,44 @@ func (s *Store) CreateBackupJob(ctx context.Context, input CreateBackupJobInput) } func (s *Store) AcquireNextQueuedJob(ctx context.Context) (*ent.BackupJob, error) { - job, err := s.client.BackupJob.Query(). - Where(backupjob.StatusEQ(backupjob.StatusQueued)). - Order(ent.Asc(backupjob.FieldCreatedAt), ent.Asc(backupjob.FieldID)). - First(ctx) - if err != nil { - return nil, err - } + for { + job, err := s.client.BackupJob.Query(). + Where(backupjob.StatusEQ(backupjob.StatusQueued)). + Order(ent.Asc(backupjob.FieldCreatedAt), ent.Asc(backupjob.FieldID)). + First(ctx) + if err != nil { + return nil, err + } - now := time.Now() - updated, err := s.client.BackupJob.UpdateOneID(job.ID). - SetStatus(backupjob.StatusRunning). - SetStartedAt(now). - ClearFinishedAt(). - ClearErrorMessage(). - Save(ctx) - if err != nil { - return nil, err - } + now := time.Now() + affected, err := s.client.BackupJob.Update(). + Where( + backupjob.IDEQ(job.ID), + backupjob.StatusEQ(backupjob.StatusQueued), + ). + SetStatus(backupjob.StatusRunning). + SetStartedAt(now). + ClearFinishedAt(). + ClearErrorMessage(). + Save(ctx) + if err != nil { + return nil, err + } + if affected == 0 { + // 并发下被其他 worker 抢占时继续重试下一条 queued 任务。 + continue + } - if err := s.appendJobEventByEntityID(ctx, updated.ID, backupjobevent.LevelInfo, "state_change", "job started", ""); err != nil { - return nil, err + updated, err := s.client.BackupJob.Query().Where(backupjob.IDEQ(job.ID)).First(ctx) + if err != nil { + return nil, err + } + + if err := s.appendJobEventByEntityID(ctx, updated.ID, backupjobevent.LevelInfo, "state_change", "job started", ""); err != nil { + return nil, err + } + return updated, nil } - return updated, nil } func (s *Store) FinishBackupJob(ctx context.Context, input FinishBackupJobInput) (*ent.BackupJob, error) { diff --git a/backup/internal/store/entstore/store_test.go b/backup/internal/store/entstore/store_test.go index a44c32333..e676b0dea 100644 --- a/backup/internal/store/entstore/store_test.go +++ b/backup/internal/store/entstore/store_test.go @@ -319,6 +319,49 @@ func TestStore_CreateBackupJob_WithSelectedSourceProfiles(t *testing.T) { require.Equal(t, "redis-custom", job.RedisProfileID) } +func TestStore_CreateBackupJob_IgnoreUnusedProfilesAndS3(t *testing.T) { + store := openTestStore(t) + ctx := context.Background() + + pgJob, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ + BackupType: backupjob.BackupTypePostgres.String(), + TriggeredBy: "admin:11", + RedisID: "redis-should-be-ignored", + }) + require.NoError(t, err) + require.True(t, created) + require.Empty(t, pgJob.RedisProfileID) + require.NotEmpty(t, pgJob.PostgresProfileID) + + redisJob, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ + BackupType: backupjob.BackupTypeRedis.String(), + TriggeredBy: "admin:12", + PostgresID: "postgres-should-be-ignored", + }) + require.NoError(t, err) + require.True(t, created) + require.Empty(t, redisJob.PostgresProfileID) + require.NotEmpty(t, redisJob.RedisProfileID) + + noS3Job, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ + BackupType: backupjob.BackupTypePostgres.String(), + TriggeredBy: "admin:13", + UploadToS3: false, + S3ProfileID: "missing-profile", + }) + require.NoError(t, err) + require.True(t, created) + require.Empty(t, noS3Job.S3ProfileID) + + _, _, err = store.CreateBackupJob(ctx, CreateBackupJobInput{ + BackupType: backupjob.BackupTypePostgres.String(), + TriggeredBy: "admin:14", + UploadToS3: true, + S3ProfileID: "missing-profile", + }) + require.Error(t, err) +} + func openTestStore(t *testing.T) *Store { t.Helper() diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index d1c058ec6..6d6e9cda7 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -199,6 +199,51 @@ gateway: # OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout) # 默认 false:过滤超时头,降低上游提前断流风险。 openai_passthrough_allow_timeout_headers: false + # OpenAI Responses WebSocket 配置(默认关闭,不影响现网 HTTP 线路) + openai_ws: + # 全局总开关,默认 false;关闭时所有请求保持原有 HTTP/SSE 路由 + enabled: false + # 按账号类型细分开关 + oauth_enabled: true + apikey_enabled: true + # 全局强制 HTTP(紧急回滚开关) + force_http: false + # 允许在 WSv2 下按策略恢复 store=true(默认 false) + allow_store_recovery: false + # 是否启用 WSv2 generate=false 预热(默认 false) + prewarm_generate_enabled: false + # 协议 feature 开关,v2 优先于 v1 + responses_websockets: false + responses_websockets_v2: true + # 连接池参数(按账号池化复用) + max_conns_per_account: 8 + min_idle_per_account: 1 + max_idle_per_account: 4 + # 是否按账号并发动态计算连接池上限: + # effective_max_conns = min(max_conns_per_account, ceil(account.concurrency * factor)) + dynamic_max_conns_by_account_concurrency_enabled: true + # 按账号类型分别设置系数(OAuth / API Key) + oauth_max_conns_factor: 1.0 + apikey_max_conns_factor: 1.0 + dial_timeout_seconds: 10 + read_timeout_seconds: 900 + write_timeout_seconds: 120 + pool_target_utilization: 0.7 + queue_limit_per_conn: 256 + # WS 回退到 HTTP 后的冷却时间(秒),用于避免 WS/HTTP 来回抖动;0 表示关闭冷却 + fallback_cooldown_seconds: 30 + # 调度与粘连参数 + lb_top_k: 3 + sticky_session_ttl_seconds: 3600 + sticky_response_id_ttl_seconds: 3600 + # 兼容旧键:当 sticky_response_id_ttl_seconds 缺失时回退该值 + sticky_previous_response_ttl_seconds: 3600 + scheduler_score_weights: + priority: 1.0 + load: 1.0 + queue: 0.7 + error_rate: 0.8 + ttft: 0.5 # HTTP upstream connection pool settings (HTTP/2 + multi-proxy scenario defaults) # HTTP 上游连接池配置(HTTP/2 + 多代理场景默认值) # Max idle connections across all hosts diff --git a/frontend/src/components/account/CreateAccountModal.vue b/frontend/src/components/account/CreateAccountModal.vue index 30da07675..39ad5ee85 100644 --- a/frontend/src/components/account/CreateAccountModal.vue +++ b/frontend/src/components/account/CreateAccountModal.vue @@ -1697,6 +1697,42 @@ + +
+
+
+ +

+ {{ t('admin.accounts.openai.wsModeDesc') }} +

+
+ +
+

+ {{ t('admin.accounts.openai.responsesWebsocketsV2PassthroughHint') }} +

+
+
(null) const interceptWarmupRequests = ref(false) const autoPauseOnExpired = ref(true) const openaiPassthroughEnabled = ref(false) +const openaiOAuthResponsesWebSocketV2Enabled = ref(false) +const openaiAPIKeyResponsesWebSocketV2Enabled = ref(false) const codexCLIOnlyEnabled = ref(false) const anthropicPassthroughEnabled = ref(false) const mixedScheduling = ref(false) // For antigravity accounts: enable mixed scheduling @@ -2374,6 +2412,22 @@ const geminiSelectedTier = computed(() => { } }) +const openaiResponsesWebSocketV2Enabled = computed({ + get: () => { + if (form.platform === 'openai' && accountCategory.value === 'apikey') { + return openaiAPIKeyResponsesWebSocketV2Enabled.value + } + return openaiOAuthResponsesWebSocketV2Enabled.value + }, + set: (enabled: boolean) => { + if (form.platform === 'openai' && accountCategory.value === 'apikey') { + openaiAPIKeyResponsesWebSocketV2Enabled.value = enabled + return + } + openaiOAuthResponsesWebSocketV2Enabled.value = enabled + } +}) + const isOpenAIModelRestrictionDisabled = computed(() => form.platform === 'openai' && openaiPassthroughEnabled.value ) @@ -2555,6 +2609,8 @@ watch( } if (newPlatform !== 'openai') { openaiPassthroughEnabled.value = false + openaiOAuthResponsesWebSocketV2Enabled.value = false + openaiAPIKeyResponsesWebSocketV2Enabled.value = false codexCLIOnlyEnabled.value = false } if (newPlatform !== 'anthropic') { @@ -2827,6 +2883,8 @@ const resetForm = () => { interceptWarmupRequests.value = false autoPauseOnExpired.value = true openaiPassthroughEnabled.value = false + openaiOAuthResponsesWebSocketV2Enabled.value = false + openaiAPIKeyResponsesWebSocketV2Enabled.value = false codexCLIOnlyEnabled.value = false anthropicPassthroughEnabled.value = false // Reset quota control state @@ -2867,6 +2925,11 @@ const buildOpenAIExtra = (base?: Record): Record = { ...(base || {}) } + extra.openai_oauth_responses_websockets_v2_enabled = openaiOAuthResponsesWebSocketV2Enabled.value + extra.openai_apikey_responses_websockets_v2_enabled = openaiAPIKeyResponsesWebSocketV2Enabled.value + // 清理兼容旧键,统一改用分类型开关。 + delete extra.responses_websockets_v2_enabled + delete extra.openai_ws_enabled if (openaiPassthroughEnabled.value) { extra.openai_passthrough = true } else { @@ -2912,6 +2975,10 @@ const buildSoraExtra = ( delete extra.openai_passthrough delete extra.openai_oauth_passthrough delete extra.codex_cli_only + delete extra.openai_oauth_responses_websockets_v2_enabled + delete extra.openai_apikey_responses_websockets_v2_enabled + delete extra.responses_websockets_v2_enabled + delete extra.openai_ws_enabled return Object.keys(extra).length > 0 ? extra : undefined } diff --git a/frontend/src/components/account/EditAccountModal.vue b/frontend/src/components/account/EditAccountModal.vue index c6643717f..04ce80b00 100644 --- a/frontend/src/components/account/EditAccountModal.vue +++ b/frontend/src/components/account/EditAccountModal.vue @@ -735,6 +735,42 @@
+ +
+
+
+ +

+ {{ t('admin.accounts.openai.wsModeDesc') }} +

+
+ +
+

+ {{ t('admin.accounts.openai.responsesWebsocketsV2PassthroughHint') }} +

+
+
('5m') // OpenAI 自动透传开关(OAuth/API Key) const openaiPassthroughEnabled = ref(false) +const openaiOAuthResponsesWebSocketV2Enabled = ref(false) +const openaiAPIKeyResponsesWebSocketV2Enabled = ref(false) const codexCLIOnlyEnabled = ref(false) const anthropicPassthroughEnabled = ref(false) +const openaiResponsesWebSocketV2Enabled = computed({ + get: () => { + if (props.account?.type === 'apikey') { + return openaiAPIKeyResponsesWebSocketV2Enabled.value + } + return openaiOAuthResponsesWebSocketV2Enabled.value + }, + set: (enabled: boolean) => { + if (props.account?.type === 'apikey') { + openaiAPIKeyResponsesWebSocketV2Enabled.value = enabled + return + } + openaiOAuthResponsesWebSocketV2Enabled.value = enabled + } +}) const isOpenAIModelRestrictionDisabled = computed(() => props.account?.platform === 'openai' && openaiPassthroughEnabled.value ) @@ -1347,10 +1400,32 @@ watch( // Load OpenAI passthrough toggle (OpenAI OAuth/API Key) openaiPassthroughEnabled.value = false + openaiOAuthResponsesWebSocketV2Enabled.value = false + openaiAPIKeyResponsesWebSocketV2Enabled.value = false codexCLIOnlyEnabled.value = false anthropicPassthroughEnabled.value = false if (newAccount.platform === 'openai' && (newAccount.type === 'oauth' || newAccount.type === 'apikey')) { openaiPassthroughEnabled.value = extra?.openai_passthrough === true || extra?.openai_oauth_passthrough === true + if (typeof extra?.openai_oauth_responses_websockets_v2_enabled === 'boolean') { + openaiOAuthResponsesWebSocketV2Enabled.value = extra.openai_oauth_responses_websockets_v2_enabled === true + } else if (newAccount.type === 'oauth') { + if (typeof extra?.responses_websockets_v2_enabled === 'boolean') { + openaiOAuthResponsesWebSocketV2Enabled.value = extra.responses_websockets_v2_enabled === true + } else { + // 兼容旧键:openai_ws_enabled + openaiOAuthResponsesWebSocketV2Enabled.value = extra?.openai_ws_enabled === true + } + } + if (typeof extra?.openai_apikey_responses_websockets_v2_enabled === 'boolean') { + openaiAPIKeyResponsesWebSocketV2Enabled.value = extra.openai_apikey_responses_websockets_v2_enabled === true + } else if (newAccount.type === 'apikey') { + if (typeof extra?.responses_websockets_v2_enabled === 'boolean') { + openaiAPIKeyResponsesWebSocketV2Enabled.value = extra.responses_websockets_v2_enabled === true + } else { + // 兼容旧键:openai_ws_enabled + openaiAPIKeyResponsesWebSocketV2Enabled.value = extra?.openai_ws_enabled === true + } + } if (newAccount.type === 'oauth') { codexCLIOnlyEnabled.value = extra?.codex_cli_only === true } @@ -1934,6 +2009,10 @@ const handleSubmit = async () => { const currentExtra = (props.account.extra as Record) || {} const newExtra: Record = { ...currentExtra } const hadCodexCLIOnlyEnabled = currentExtra.codex_cli_only === true + newExtra.openai_oauth_responses_websockets_v2_enabled = openaiOAuthResponsesWebSocketV2Enabled.value + newExtra.openai_apikey_responses_websockets_v2_enabled = openaiAPIKeyResponsesWebSocketV2Enabled.value + delete newExtra.responses_websockets_v2_enabled + delete newExtra.openai_ws_enabled if (openaiPassthroughEnabled.value) { newExtra.openai_passthrough = true } else { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 838a4a2e0..176deab25 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -1716,6 +1716,20 @@ export default { oauthPassthrough: 'Auto passthrough (auth only)', oauthPassthroughDesc: 'When enabled, this OpenAI account uses automatic passthrough: the gateway forwards request/response as-is and only swaps auth, while keeping billing/concurrency/audit and necessary safety filtering.', + responsesWebsocketsV2: 'Responses WebSocket v2', + responsesWebsocketsV2Desc: + 'Disabled by default. Enable to allow responses_websockets_v2 capability (still gated by global and account-type switches).', + wsMode: 'WS mode', + wsModeDesc: + 'Only applies to the current OpenAI account type. This account can use OpenAI WebSocket Mode only when enabled.', + oauthResponsesWebsocketsV2: 'OAuth WebSocket Mode', + oauthResponsesWebsocketsV2Desc: + 'Only applies to OpenAI OAuth. This account can use OpenAI WebSocket Mode only when enabled.', + apiKeyResponsesWebsocketsV2: 'API Key WebSocket Mode', + apiKeyResponsesWebsocketsV2Desc: + 'Only applies to OpenAI API Key. This account can use OpenAI WebSocket Mode only when enabled.', + responsesWebsocketsV2PassthroughHint: + 'Automatic passthrough is currently enabled: passthrough takes priority, so the WSv2 switch is not effective.', codexCLIOnly: 'Codex official clients only', codexCLIOnlyDesc: 'Only applies to OpenAI OAuth. When enabled, only Codex official client families are allowed; when disabled, the gateway bypasses this restriction and keeps existing behavior.', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index a3f30cea6..adb450702 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -1865,6 +1865,18 @@ export default { oauthPassthrough: '自动透传(仅替换认证)', oauthPassthroughDesc: '开启后,该 OpenAI 账号将自动透传请求与响应,仅替换认证并保留计费/并发/审计及必要安全过滤;如遇兼容性问题可随时关闭回滚。', + responsesWebsocketsV2: 'Responses WebSocket v2', + responsesWebsocketsV2Desc: + '默认关闭。开启后可启用 responses_websockets_v2 协议能力(受网关全局开关与账号类型开关约束)。', + wsMode: 'WS mode', + wsModeDesc: '仅对当前 OpenAI 账号类型生效。开启后该账号才允许使用 OpenAI WebSocket Mode 协议。', + oauthResponsesWebsocketsV2: 'OAuth WebSocket Mode', + oauthResponsesWebsocketsV2Desc: + '仅对 OpenAI OAuth 生效。开启后该账号才允许使用 OpenAI WebSocket Mode 协议。', + apiKeyResponsesWebsocketsV2: 'API Key WebSocket Mode', + apiKeyResponsesWebsocketsV2Desc: + '仅对 OpenAI API Key 生效。开启后该账号才允许使用 OpenAI WebSocket Mode 协议。', + responsesWebsocketsV2PassthroughHint: '当前已开启自动透传:透传优先,WSv2 开关暂不生效。', codexCLIOnly: '仅允许 Codex 官方客户端', codexCLIOnlyDesc: '仅对 OpenAI OAuth 生效。开启后仅允许 Codex 官方客户端家族访问;关闭后完全绕过并保持原逻辑。', modelRestrictionDisabledByPassthrough: '已开启自动透传:模型白名单/映射不会生效。', diff --git a/tools/perf/openai_responses_ws_v2_compare_k6.js b/tools/perf/openai_responses_ws_v2_compare_k6.js new file mode 100644 index 000000000..6bb4b9a21 --- /dev/null +++ b/tools/perf/openai_responses_ws_v2_compare_k6.js @@ -0,0 +1,167 @@ +import http from 'k6/http'; +import { check, sleep } from 'k6'; +import { Rate, Trend } from 'k6/metrics'; + +const baseURL = (__ENV.BASE_URL || 'http://127.0.0.1:5231').replace(/\/$/, ''); +const httpAPIKey = (__ENV.HTTP_API_KEY || '').trim(); +const wsAPIKey = (__ENV.WS_API_KEY || '').trim(); +const model = __ENV.MODEL || 'gpt-5.1'; +const duration = __ENV.DURATION || '5m'; +const timeout = __ENV.TIMEOUT || '180s'; + +const httpRPS = Number(__ENV.HTTP_RPS || 10); +const wsRPS = Number(__ENV.WS_RPS || 10); +const chainRPS = Number(__ENV.CHAIN_RPS || 1); +const chainRounds = Number(__ENV.CHAIN_ROUNDS || 20); +const preAllocatedVUs = Number(__ENV.PRE_ALLOCATED_VUS || 40); +const maxVUs = Number(__ENV.MAX_VUS || 300); + +const httpDurationMs = new Trend('openai_http_req_duration_ms', true); +const wsDurationMs = new Trend('openai_ws_req_duration_ms', true); +const wsChainDurationMs = new Trend('openai_ws_chain_round_duration_ms', true); +const wsChainTTFTMs = new Trend('openai_ws_chain_round_ttft_ms', true); +const httpNon2xxRate = new Rate('openai_http_non2xx_rate'); +const wsNon2xxRate = new Rate('openai_ws_non2xx_rate'); +const wsChainRoundSuccessRate = new Rate('openai_ws_chain_round_success_rate'); + +export const options = { + scenarios: { + http_baseline: { + executor: 'constant-arrival-rate', + exec: 'runHTTPBaseline', + rate: httpRPS, + timeUnit: '1s', + duration, + preAllocatedVUs, + maxVUs, + tags: { path: 'http_baseline' }, + }, + ws_baseline: { + executor: 'constant-arrival-rate', + exec: 'runWSBaseline', + rate: wsRPS, + timeUnit: '1s', + duration, + preAllocatedVUs, + maxVUs, + tags: { path: 'ws_baseline' }, + }, + ws_chain_20_rounds: { + executor: 'constant-arrival-rate', + exec: 'runWSChain20Rounds', + rate: chainRPS, + timeUnit: '1s', + duration, + preAllocatedVUs: Math.max(2, Math.ceil(chainRPS * 2)), + maxVUs: Math.max(20, Math.ceil(chainRPS * 10)), + tags: { path: 'ws_chain_20_rounds' }, + }, + }, + thresholds: { + openai_http_non2xx_rate: ['rate<0.02'], + openai_ws_non2xx_rate: ['rate<0.02'], + openai_http_req_duration_ms: ['p(95)<4000', 'p(99)<7000'], + openai_ws_req_duration_ms: ['p(95)<3000', 'p(99)<6000'], + openai_ws_chain_round_success_rate: ['rate>0.98'], + openai_ws_chain_round_ttft_ms: ['p(99)<1200'], + }, +}; + +function buildHeaders(apiKey) { + const headers = { + 'Content-Type': 'application/json', + 'User-Agent': 'codex_cli_rs/0.98.0', + }; + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + return headers; +} + +function buildBody(previousResponseID) { + const body = { + model, + stream: false, + input: [ + { + role: 'user', + content: [{ type: 'input_text', text: '请回复一个单词: pong' }], + }, + ], + max_output_tokens: 64, + }; + if (previousResponseID) { + body.previous_response_id = previousResponseID; + } + return JSON.stringify(body); +} + +function postResponses(apiKey, body, tags) { + const res = http.post(`${baseURL}/v1/responses`, body, { + headers: buildHeaders(apiKey), + timeout, + tags, + }); + check(res, { + 'status is 2xx': (r) => r.status >= 200 && r.status < 300, + }); + return res; +} + +function parseResponseID(res) { + if (!res || !res.body) { + return ''; + } + try { + const payload = JSON.parse(res.body); + if (payload && typeof payload.id === 'string') { + return payload.id.trim(); + } + } catch (_) { + return ''; + } + return ''; +} + +export function runHTTPBaseline() { + const res = postResponses(httpAPIKey, buildBody(''), { transport: 'http' }); + httpDurationMs.add(res.timings.duration, { transport: 'http' }); + httpNon2xxRate.add(res.status < 200 || res.status >= 300, { transport: 'http' }); +} + +export function runWSBaseline() { + const res = postResponses(wsAPIKey, buildBody(''), { transport: 'ws_v2' }); + wsDurationMs.add(res.timings.duration, { transport: 'ws_v2' }); + wsNon2xxRate.add(res.status < 200 || res.status >= 300, { transport: 'ws_v2' }); +} + +// 20+ 轮续链专项,验证 previous_response_id 在长链下的稳定性与时延。 +export function runWSChain20Rounds() { + let previousResponseID = ''; + for (let round = 1; round <= chainRounds; round += 1) { + const roundStart = Date.now(); + const res = postResponses(wsAPIKey, buildBody(previousResponseID), { transport: 'ws_v2_chain' }); + const ok = res.status >= 200 && res.status < 300; + wsChainRoundSuccessRate.add(ok, { round: `${round}` }); + wsChainDurationMs.add(Date.now() - roundStart, { round: `${round}` }); + wsChainTTFTMs.add(res.timings.waiting, { round: `${round}` }); + wsNon2xxRate.add(!ok, { transport: 'ws_v2_chain' }); + if (!ok) { + return; + } + const respID = parseResponseID(res); + if (!respID) { + wsChainRoundSuccessRate.add(false, { round: `${round}`, reason: 'missing_response_id' }); + return; + } + previousResponseID = respID; + sleep(0.01); + } +} + +export function handleSummary(data) { + return { + stdout: `\nOpenAI WSv2 对比压测完成\n${JSON.stringify(data.metrics, null, 2)}\n`, + 'docs/perf/openai-ws-v2-compare-summary.json': JSON.stringify(data, null, 2), + }; +} diff --git a/tools/perf/openai_ws_pooling_compare_k6.js b/tools/perf/openai_ws_pooling_compare_k6.js new file mode 100644 index 000000000..d82104791 --- /dev/null +++ b/tools/perf/openai_ws_pooling_compare_k6.js @@ -0,0 +1,123 @@ +import http from 'k6/http'; +import { check } from 'k6'; +import { Rate, Trend } from 'k6/metrics'; + +const pooledBaseURL = (__ENV.POOLED_BASE_URL || 'http://127.0.0.1:5231').replace(/\/$/, ''); +const oneToOneBaseURL = (__ENV.ONE_TO_ONE_BASE_URL || '').replace(/\/$/, ''); +const wsAPIKey = (__ENV.WS_API_KEY || '').trim(); +const model = __ENV.MODEL || 'gpt-5.1'; +const timeout = __ENV.TIMEOUT || '180s'; +const duration = __ENV.DURATION || '5m'; +const pooledRPS = Number(__ENV.POOLED_RPS || 12); +const oneToOneRPS = Number(__ENV.ONE_TO_ONE_RPS || 12); +const preAllocatedVUs = Number(__ENV.PRE_ALLOCATED_VUS || 50); +const maxVUs = Number(__ENV.MAX_VUS || 400); + +const pooledDurationMs = new Trend('openai_ws_pooled_duration_ms', true); +const oneToOneDurationMs = new Trend('openai_ws_one_to_one_duration_ms', true); +const pooledTTFTMs = new Trend('openai_ws_pooled_ttft_ms', true); +const oneToOneTTFTMs = new Trend('openai_ws_one_to_one_ttft_ms', true); +const pooledNon2xxRate = new Rate('openai_ws_pooled_non2xx_rate'); +const oneToOneNon2xxRate = new Rate('openai_ws_one_to_one_non2xx_rate'); + +export const options = { + scenarios: { + pooled_mode: { + executor: 'constant-arrival-rate', + exec: 'runPooledMode', + rate: pooledRPS, + timeUnit: '1s', + duration, + preAllocatedVUs, + maxVUs, + tags: { mode: 'pooled' }, + }, + one_to_one_mode: { + executor: 'constant-arrival-rate', + exec: 'runOneToOneMode', + rate: oneToOneRPS, + timeUnit: '1s', + duration, + preAllocatedVUs, + maxVUs, + tags: { mode: 'one_to_one' }, + startTime: '5s', + }, + }, + thresholds: { + openai_ws_pooled_non2xx_rate: ['rate<0.02'], + openai_ws_one_to_one_non2xx_rate: ['rate<0.02'], + openai_ws_pooled_duration_ms: ['p(95)<3000', 'p(99)<6000'], + openai_ws_one_to_one_duration_ms: ['p(95)<6000', 'p(99)<10000'], + }, +}; + +function buildHeaders() { + const headers = { + 'Content-Type': 'application/json', + 'User-Agent': 'codex_cli_rs/0.98.0', + }; + if (wsAPIKey) { + headers.Authorization = `Bearer ${wsAPIKey}`; + } + return headers; +} + +function buildBody() { + return JSON.stringify({ + model, + stream: false, + input: [ + { + role: 'user', + content: [{ type: 'input_text', text: '请回复: pong' }], + }, + ], + max_output_tokens: 48, + }); +} + +function send(baseURL, mode) { + if (!baseURL) { + return null; + } + const res = http.post(`${baseURL}/v1/responses`, buildBody(), { + headers: buildHeaders(), + timeout, + tags: { mode }, + }); + check(res, { + 'status is 2xx': (r) => r.status >= 200 && r.status < 300, + }); + return res; +} + +export function runPooledMode() { + const res = send(pooledBaseURL, 'pooled'); + if (!res) { + return; + } + pooledDurationMs.add(res.timings.duration, { mode: 'pooled' }); + pooledTTFTMs.add(res.timings.waiting, { mode: 'pooled' }); + pooledNon2xxRate.add(res.status < 200 || res.status >= 300, { mode: 'pooled' }); +} + +export function runOneToOneMode() { + if (!oneToOneBaseURL) { + return; + } + const res = send(oneToOneBaseURL, 'one_to_one'); + if (!res) { + return; + } + oneToOneDurationMs.add(res.timings.duration, { mode: 'one_to_one' }); + oneToOneTTFTMs.add(res.timings.waiting, { mode: 'one_to_one' }); + oneToOneNon2xxRate.add(res.status < 200 || res.status >= 300, { mode: 'one_to_one' }); +} + +export function handleSummary(data) { + return { + stdout: `\nOpenAI WS 池化 vs 1:1 对比压测完成\n${JSON.stringify(data.metrics, null, 2)}\n`, + 'docs/perf/openai-ws-pooling-compare-summary.json': JSON.stringify(data, null, 2), + }; +} From 153296804f7f583522a98d1ba47113ff8c5dbf38 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 15:24:57 +0800 Subject: [PATCH 005/120] =?UTF-8?q?feat:=20=E5=BC=80=E5=A7=8B=E5=88=A0?= =?UTF-8?q?=E9=99=A4=E6=95=B0=E6=8D=AE=E7=AE=A1=E7=90=86=E7=9A=84=E5=8A=9F?= =?UTF-8?q?=E8=83=BD,=E8=BF=99=E4=B8=AA=E5=8A=9F=E8=83=BD=E5=85=A8?= =?UTF-8?q?=E9=83=A8=E7=A7=BB=E5=8A=A8=E5=88=B0=E5=85=B6=E4=BB=96=E7=9A=84?= =?UTF-8?q?=E5=8D=95=E7=8B=AC=E9=A1=B9=E7=9B=AE=E4=B8=AD=E5=8E=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- Makefile | 12 +- README_CN.md | 8 +- .../handler/admin/data_management_handler.go | 10 +- .../admin/data_management_handler_test.go | 4 +- .../internal/service/data_management_grpc.go | 103 +- .../service/data_management_grpc_test.go | 30 +- .../service/data_management_service.go | 66 +- .../service/data_management_service_test.go | 8 +- backup/README.md | 49 - backup/cmd/backupd/main.go | 114 - backup/ent/backupjob.go | 352 -- backup/ent/backupjob/backupjob.go | 299 - backup/ent/backupjob/where.go | 1344 ---- backup/ent/backupjob_create.go | 604 -- backup/ent/backupjob_delete.go | 88 - backup/ent/backupjob_query.go | 606 -- backup/ent/backupjob_update.go | 1277 ---- backup/ent/backupjobevent.go | 201 - backup/ent/backupjobevent/backupjobevent.go | 158 - backup/ent/backupjobevent/where.go | 449 -- backup/ent/backupjobevent_create.go | 354 -- backup/ent/backupjobevent_delete.go | 88 - backup/ent/backupjobevent_query.go | 606 -- backup/ent/backupjobevent_update.go | 517 -- backup/ent/backups3config.go | 250 - backup/ent/backups3config/backups3config.go | 184 - backup/ent/backups3config/where.go | 790 --- backup/ent/backups3config_create.go | 520 -- backup/ent/backups3config_delete.go | 88 - backup/ent/backups3config_query.go | 527 -- backup/ent/backups3config_update.go | 638 -- backup/ent/backupsetting.go | 172 - backup/ent/backupsetting/backupsetting.go | 141 - backup/ent/backupsetting/where.go | 410 -- backup/ent/backupsetting_create.go | 357 -- backup/ent/backupsetting_delete.go | 88 - backup/ent/backupsetting_query.go | 527 -- backup/ent/backupsetting_update.go | 448 -- backup/ent/backupsourceconfig.go | 267 - .../backupsourceconfig/backupsourceconfig.go | 198 - backup/ent/backupsourceconfig/where.go | 995 --- backup/ent/backupsourceconfig_create.go | 465 -- backup/ent/backupsourceconfig_delete.go | 88 - backup/ent/backupsourceconfig_query.go | 527 -- backup/ent/backupsourceconfig_update.go | 864 --- backup/ent/client.go | 947 --- backup/ent/ent.go | 616 -- backup/ent/enttest/enttest.go | 84 - backup/ent/generate.go | 3 - backup/ent/hook/hook.go | 247 - backup/ent/migrate/migrate.go | 64 - backup/ent/migrate/schema.go | 207 - backup/ent/mutation.go | 5587 ----------------- backup/ent/predicate/predicate.go | 22 - backup/ent/runtime.go | 158 - backup/ent/runtime/runtime.go | 10 - backup/ent/schema/backup_job.go | 56 - backup/ent/schema/backup_job_event.go | 38 - backup/ent/schema/backup_s3_config.go | 39 - backup/ent/schema/backup_setting.go | 24 - backup/ent/schema/backup_source_config.go | 40 - backup/ent/tx.go | 222 - backup/go.mod | 62 - backup/go.sum | 186 - backup/internal/artifact/doc.go | 1 - backup/internal/config/doc.go | 1 - backup/internal/executor/doc.go | 1 - backup/internal/executor/runner.go | 788 --- backup/internal/executor/runner_test.go | 110 - backup/internal/grpcserver/doc.go | 1 - backup/internal/grpcserver/interceptor.go | 131 - .../internal/grpcserver/interceptor_test.go | 50 - backup/internal/grpcserver/server.go | 697 -- backup/internal/s3client/client.go | 142 - backup/internal/s3client/doc.go | 1 - backup/internal/store/entstore/doc.go | 1 - backup/internal/store/entstore/store.go | 1881 ------ backup/internal/store/entstore/store_test.go | 378 -- backup/proto/backup/v1/backup.pb.go | 2877 --------- backup/proto/backup/v1/backup.proto | 267 - backup/proto/backup/v1/backup_grpc.pb.go | 729 --- deploy/BACKUPD_CN.md | 78 - deploy/DATAMANAGEMENTD_CN.md | 78 + deploy/README.md | 16 +- deploy/docker-compose.override.yml.example | 8 +- ...-backupd.sh => install-datamanagementd.sh} | 38 +- ...ervice => sub2api-datamanagementd.service} | 8 +- frontend/src/i18n/locales/en.ts | 12 +- frontend/src/i18n/locales/zh.ts | 12 +- .../src/views/admin/DataManagementView.vue | 9 +- 90 files changed, 267 insertions(+), 32551 deletions(-) delete mode 100644 backup/README.md delete mode 100644 backup/cmd/backupd/main.go delete mode 100644 backup/ent/backupjob.go delete mode 100644 backup/ent/backupjob/backupjob.go delete mode 100644 backup/ent/backupjob/where.go delete mode 100644 backup/ent/backupjob_create.go delete mode 100644 backup/ent/backupjob_delete.go delete mode 100644 backup/ent/backupjob_query.go delete mode 100644 backup/ent/backupjob_update.go delete mode 100644 backup/ent/backupjobevent.go delete mode 100644 backup/ent/backupjobevent/backupjobevent.go delete mode 100644 backup/ent/backupjobevent/where.go delete mode 100644 backup/ent/backupjobevent_create.go delete mode 100644 backup/ent/backupjobevent_delete.go delete mode 100644 backup/ent/backupjobevent_query.go delete mode 100644 backup/ent/backupjobevent_update.go delete mode 100644 backup/ent/backups3config.go delete mode 100644 backup/ent/backups3config/backups3config.go delete mode 100644 backup/ent/backups3config/where.go delete mode 100644 backup/ent/backups3config_create.go delete mode 100644 backup/ent/backups3config_delete.go delete mode 100644 backup/ent/backups3config_query.go delete mode 100644 backup/ent/backups3config_update.go delete mode 100644 backup/ent/backupsetting.go delete mode 100644 backup/ent/backupsetting/backupsetting.go delete mode 100644 backup/ent/backupsetting/where.go delete mode 100644 backup/ent/backupsetting_create.go delete mode 100644 backup/ent/backupsetting_delete.go delete mode 100644 backup/ent/backupsetting_query.go delete mode 100644 backup/ent/backupsetting_update.go delete mode 100644 backup/ent/backupsourceconfig.go delete mode 100644 backup/ent/backupsourceconfig/backupsourceconfig.go delete mode 100644 backup/ent/backupsourceconfig/where.go delete mode 100644 backup/ent/backupsourceconfig_create.go delete mode 100644 backup/ent/backupsourceconfig_delete.go delete mode 100644 backup/ent/backupsourceconfig_query.go delete mode 100644 backup/ent/backupsourceconfig_update.go delete mode 100644 backup/ent/client.go delete mode 100644 backup/ent/ent.go delete mode 100644 backup/ent/enttest/enttest.go delete mode 100644 backup/ent/generate.go delete mode 100644 backup/ent/hook/hook.go delete mode 100644 backup/ent/migrate/migrate.go delete mode 100644 backup/ent/migrate/schema.go delete mode 100644 backup/ent/mutation.go delete mode 100644 backup/ent/predicate/predicate.go delete mode 100644 backup/ent/runtime.go delete mode 100644 backup/ent/runtime/runtime.go delete mode 100644 backup/ent/schema/backup_job.go delete mode 100644 backup/ent/schema/backup_job_event.go delete mode 100644 backup/ent/schema/backup_s3_config.go delete mode 100644 backup/ent/schema/backup_setting.go delete mode 100644 backup/ent/schema/backup_source_config.go delete mode 100644 backup/ent/tx.go delete mode 100644 backup/go.mod delete mode 100644 backup/go.sum delete mode 100644 backup/internal/artifact/doc.go delete mode 100644 backup/internal/config/doc.go delete mode 100644 backup/internal/executor/doc.go delete mode 100644 backup/internal/executor/runner.go delete mode 100644 backup/internal/executor/runner_test.go delete mode 100644 backup/internal/grpcserver/doc.go delete mode 100644 backup/internal/grpcserver/interceptor.go delete mode 100644 backup/internal/grpcserver/interceptor_test.go delete mode 100644 backup/internal/grpcserver/server.go delete mode 100644 backup/internal/s3client/client.go delete mode 100644 backup/internal/s3client/doc.go delete mode 100644 backup/internal/store/entstore/doc.go delete mode 100644 backup/internal/store/entstore/store.go delete mode 100644 backup/internal/store/entstore/store_test.go delete mode 100644 backup/proto/backup/v1/backup.pb.go delete mode 100644 backup/proto/backup/v1/backup.proto delete mode 100644 backup/proto/backup/v1/backup_grpc.pb.go delete mode 100644 deploy/BACKUPD_CN.md create mode 100644 deploy/DATAMANAGEMENTD_CN.md rename deploy/{install-backupd.sh => install-datamanagementd.sh} (62%) rename deploy/{sub2api-backupd.service => sub2api-datamanagementd.service} (57%) diff --git a/Makefile b/Makefile index 99d520168..fd6a5a9a5 100644 --- a/Makefile +++ b/Makefile @@ -1,4 +1,4 @@ -.PHONY: build build-backend build-frontend build-backupd test test-backend test-frontend test-backupd secret-scan +.PHONY: build build-backend build-frontend build-datamanagementd test test-backend test-frontend test-datamanagementd secret-scan # 一键编译前后端 build: build-backend build-frontend @@ -11,9 +11,9 @@ build-backend: build-frontend: @pnpm --dir frontend run build -# 编译 backupd(宿主机备份进程) -build-backupd: - @cd backup && go build -o backupd ./cmd/backupd +# 编译 datamanagementd(宿主机数据管理进程) +build-datamanagementd: + @cd datamanagement && go build -o datamanagementd ./cmd/datamanagementd # 运行测试(后端 + 前端) test: test-backend test-frontend @@ -25,8 +25,8 @@ test-frontend: @pnpm --dir frontend run lint:check @pnpm --dir frontend run typecheck -test-backupd: - @cd backup && go test ./... +test-datamanagementd: + @cd datamanagement && go test ./... secret-scan: @python3 tools/secret_scan.py diff --git a/README_CN.md b/README_CN.md index 40ff3d687..41bf69916 100644 --- a/README_CN.md +++ b/README_CN.md @@ -246,17 +246,17 @@ docker-compose -f docker-compose.local.yml logs -f sub2api **推荐:** 使用 `docker-compose.local.yml`(脚本部署)以便更轻松地管理数据。 -#### 启用“数据管理”功能(backupd) +#### 启用“数据管理”功能(datamanagementd) -如需启用管理后台“数据管理”,需要额外部署宿主机备份进程 `backupd`。 +如需启用管理后台“数据管理”,需要额外部署宿主机数据管理进程 `datamanagementd`。 关键点: -- 主进程固定探测:`/tmp/sub2api-backup.sock` +- 主进程固定探测:`/tmp/sub2api-datamanagement.sock` - 只有该 Socket 可连通时,数据管理功能才会开启 - Docker 场景需将宿主机 Socket 挂载到容器同路径 -详细部署步骤见:`deploy/BACKUPD_CN.md` +详细部署步骤见:`deploy/DATAMANAGEMENTD_CN.md` #### 访问 diff --git a/backend/internal/handler/admin/data_management_handler.go b/backend/internal/handler/admin/data_management_handler.go index 4245fcd91..69a0b5b51 100644 --- a/backend/internal/handler/admin/data_management_handler.go +++ b/backend/internal/handler/admin/data_management_handler.go @@ -488,9 +488,9 @@ func (h *DataManagementHandler) GetBackupJob(c *gin.Context) { func (h *DataManagementHandler) requireAgentEnabled(c *gin.Context) bool { if h.dataManagementService == nil { err := infraerrors.ServiceUnavailable( - service.BackupAgentUnavailableReason, - "backup agent service is not configured", - ).WithMetadata(map[string]string{"socket_path": service.DefaultBackupAgentSocketPath}) + service.DataManagementAgentUnavailableReason, + "data management agent service is not configured", + ).WithMetadata(map[string]string{"socket_path": service.DefaultDataManagementAgentSocketPath}) response.ErrorFrom(c, err) return false } @@ -507,8 +507,8 @@ func (h *DataManagementHandler) getAgentHealth(c *gin.Context) service.DataManag if h.dataManagementService == nil { return service.DataManagementAgentHealth{ Enabled: false, - Reason: service.BackupAgentUnavailableReason, - SocketPath: service.DefaultBackupAgentSocketPath, + Reason: service.DataManagementAgentUnavailableReason, + SocketPath: service.DefaultDataManagementAgentSocketPath, } } return h.dataManagementService.GetAgentHealth(c.Request.Context()) diff --git a/backend/internal/handler/admin/data_management_handler_test.go b/backend/internal/handler/admin/data_management_handler_test.go index 235b4bd0c..6af64850d 100644 --- a/backend/internal/handler/admin/data_management_handler_test.go +++ b/backend/internal/handler/admin/data_management_handler_test.go @@ -46,7 +46,7 @@ func TestDataManagementHandler_AgentHealthAlways200(t *testing.T) { } require.NoError(t, json.Unmarshal(envelope.Data, &data)) require.False(t, data.Enabled) - require.Equal(t, service.BackupAgentSocketMissingReason, data.Reason) + require.Equal(t, service.DataManagementAgentSocketMissingReason, data.Reason) require.Equal(t, svc.SocketPath(), data.SocketPath) } @@ -68,7 +68,7 @@ func TestDataManagementHandler_NonHealthRouteReturns503WhenDisabled(t *testing.T var envelope apiEnvelope require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &envelope)) require.Equal(t, http.StatusServiceUnavailable, envelope.Code) - require.Equal(t, service.BackupAgentSocketMissingReason, envelope.Reason) + require.Equal(t, service.DataManagementAgentSocketMissingReason, envelope.Reason) } func TestNormalizeBackupIdempotencyKey(t *testing.T) { diff --git a/backend/internal/service/data_management_grpc.go b/backend/internal/service/data_management_grpc.go index 7ad4b5264..dfdd5e126 100644 --- a/backend/internal/service/data_management_grpc.go +++ b/backend/internal/service/data_management_grpc.go @@ -20,13 +20,13 @@ import ( ) const ( - backupInvalidArgumentReason = "BACKUP_INVALID_ARGUMENT" - backupResourceNotFoundReason = "BACKUP_RESOURCE_NOT_FOUND" - backupResourceConflictReason = "BACKUP_RESOURCE_CONFLICT" - backupFailedPrecondition = "BACKUP_FAILED_PRECONDITION" - backupAgentTimeoutReason = "BACKUP_AGENT_TIMEOUT" - backupAgentInternalReason = "BACKUP_AGENT_INTERNAL" - defaultBackupRPCTimeout = 8 * time.Second + dataManagementInvalidArgumentReason = "DATA_MANAGEMENT_INVALID_ARGUMENT" + dataManagementResourceNotFoundReason = "DATA_MANAGEMENT_RESOURCE_NOT_FOUND" + dataManagementResourceConflictReason = "DATA_MANAGEMENT_RESOURCE_CONFLICT" + dataManagementFailedPrecondition = "DATA_MANAGEMENT_FAILED_PRECONDITION" + dataManagementAgentTimeoutReason = "DATA_MANAGEMENT_AGENT_TIMEOUT" + dataManagementAgentInternalReason = "DATA_MANAGEMENT_AGENT_INTERNAL" + defaultDataManagementRPCTimeout = 8 * time.Second ) type DataManagementPostgresConfig struct { @@ -227,7 +227,7 @@ func (s *DataManagementService) UpdateConfig(ctx context.Context, cfg DataManage func (s *DataManagementService) ListSourceProfiles(ctx context.Context, sourceType string) ([]DataManagementSourceProfile, error) { sourceType = strings.TrimSpace(sourceType) if sourceType != "postgres" && sourceType != "redis" { - return nil, infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + return nil, infraerrors.BadRequest(dataManagementInvalidArgumentReason, "source_type must be postgres or redis") } var resp *backupv1.ListSourceProfilesResponse @@ -298,10 +298,10 @@ func (s *DataManagementService) DeleteSourceProfile(ctx context.Context, sourceT sourceType = strings.TrimSpace(sourceType) profileID = strings.TrimSpace(profileID) if sourceType != "postgres" && sourceType != "redis" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "source_type must be postgres or redis") } if profileID == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "profile_id is required") } return s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { @@ -317,10 +317,10 @@ func (s *DataManagementService) SetActiveSourceProfile(ctx context.Context, sour sourceType = strings.TrimSpace(sourceType) profileID = strings.TrimSpace(profileID) if sourceType != "postgres" && sourceType != "redis" { - return DataManagementSourceProfile{}, infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + return DataManagementSourceProfile{}, infraerrors.BadRequest(dataManagementInvalidArgumentReason, "source_type must be postgres or redis") } if profileID == "" { - return DataManagementSourceProfile{}, infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + return DataManagementSourceProfile{}, infraerrors.BadRequest(dataManagementInvalidArgumentReason, "profile_id is required") } var resp *backupv1.SetActiveSourceProfileResponse @@ -431,7 +431,7 @@ func (s *DataManagementService) UpdateS3Profile(ctx context.Context, input DataM func (s *DataManagementService) DeleteS3Profile(ctx context.Context, profileID string) error { profileID = strings.TrimSpace(profileID) if profileID == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "profile_id is required") } return s.withClient(ctx, func(callCtx context.Context, client backupv1.BackupServiceClient) error { @@ -443,7 +443,7 @@ func (s *DataManagementService) DeleteS3Profile(ctx context.Context, profileID s func (s *DataManagementService) SetActiveS3Profile(ctx context.Context, profileID string) (DataManagementS3Profile, error) { profileID = strings.TrimSpace(profileID) if profileID == "" { - return DataManagementS3Profile{}, infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + return DataManagementS3Profile{}, infraerrors.BadRequest(dataManagementInvalidArgumentReason, "profile_id is required") } var resp *backupv1.SetActiveS3ProfileResponse @@ -521,55 +521,55 @@ func (s *DataManagementService) withClient(ctx context.Context, call func(contex } socketPath := s.SocketPath() - conn, err := s.dialBackupAgent(ctx, socketPath) + conn, err := s.dialDataManagementAgent(ctx, socketPath) if err != nil { - return ErrBackupAgentUnavailable.WithMetadata(map[string]string{"socket_path": socketPath}).WithCause(err) + return ErrDataManagementAgentUnavailable.WithMetadata(map[string]string{"socket_path": socketPath}).WithCause(err) } defer func() { _ = conn.Close() }() client := backupv1.NewBackupServiceClient(conn) - callCtx, callCancel := context.WithTimeout(ctx, defaultBackupRPCTimeout) + callCtx, callCancel := context.WithTimeout(ctx, defaultDataManagementRPCTimeout) defer callCancel() if requestID := requestIDFromContext(ctx); requestID != "" { callCtx = metadata.AppendToOutgoingContext(callCtx, "x-request-id", requestID) } if err := call(callCtx, client); err != nil { - return mapBackupGRPCError(err, socketPath) + return mapDataManagementGRPCError(err, socketPath) } return nil } -func mapBackupGRPCError(err error, socketPath string) error { +func mapDataManagementGRPCError(err error, socketPath string) error { if err == nil { return nil } st, ok := grpcstatus.FromError(err) if !ok { - return infraerrors.InternalServer(backupAgentInternalReason, "backup agent call failed"). + return infraerrors.InternalServer(dataManagementAgentInternalReason, "data management agent call failed"). WithMetadata(map[string]string{"socket_path": socketPath}). WithCause(err) } switch st.Code() { case codes.InvalidArgument: - return infraerrors.BadRequest(backupInvalidArgumentReason, st.Message()) + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, st.Message()) case codes.AlreadyExists: - return infraerrors.New(409, backupResourceConflictReason, st.Message()) + return infraerrors.New(409, dataManagementResourceConflictReason, st.Message()) case codes.NotFound: - return infraerrors.NotFound(backupResourceNotFoundReason, st.Message()) + return infraerrors.NotFound(dataManagementResourceNotFoundReason, st.Message()) case codes.FailedPrecondition: - return infraerrors.New(412, backupFailedPrecondition, st.Message()) + return infraerrors.New(412, dataManagementFailedPrecondition, st.Message()) case codes.Unavailable: - return infraerrors.ServiceUnavailable(BackupAgentUnavailableReason, st.Message()). + return infraerrors.ServiceUnavailable(DataManagementAgentUnavailableReason, st.Message()). WithMetadata(map[string]string{"socket_path": socketPath}) case codes.DeadlineExceeded: - return infraerrors.GatewayTimeout(backupAgentTimeoutReason, st.Message()) + return infraerrors.GatewayTimeout(dataManagementAgentTimeoutReason, st.Message()) default: - return infraerrors.InternalServer(backupAgentInternalReason, st.Message()). + return infraerrors.InternalServer(dataManagementAgentInternalReason, st.Message()). WithMetadata(map[string]string{ "socket_path": socketPath, "grpc_code": st.Code().String(), @@ -785,42 +785,42 @@ func mapProtoJob(job *backupv1.BackupJob) DataManagementBackupJob { func validateDataManagementConfig(cfg DataManagementConfig) error { sourceMode := strings.TrimSpace(cfg.SourceMode) if sourceMode != "direct" && sourceMode != "docker_exec" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "source_mode must be direct or docker_exec") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "source_mode must be direct or docker_exec") } if strings.TrimSpace(cfg.BackupRoot) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "backup_root is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "backup_root is required") } if cfg.RetentionDays <= 0 { - return infraerrors.BadRequest(backupInvalidArgumentReason, "retention_days must be > 0") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "retention_days must be > 0") } if cfg.KeepLast <= 0 { - return infraerrors.BadRequest(backupInvalidArgumentReason, "keep_last must be > 0") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "keep_last must be > 0") } if strings.TrimSpace(cfg.Postgres.Database) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.database is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "postgres.database is required") } if cfg.Postgres.Port <= 0 { - return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.port must be > 0") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "postgres.port must be > 0") } if sourceMode == "docker_exec" { if strings.TrimSpace(cfg.Postgres.ContainerName) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.container_name is required in docker_exec mode") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "postgres.container_name is required in docker_exec mode") } if strings.TrimSpace(cfg.Redis.ContainerName) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "redis.container_name is required in docker_exec mode") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "redis.container_name is required in docker_exec mode") } } else { if strings.TrimSpace(cfg.Postgres.Host) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "postgres.host is required in direct mode") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "postgres.host is required in direct mode") } if strings.TrimSpace(cfg.Redis.Addr) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "redis.addr is required in direct mode") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "redis.addr is required in direct mode") } } if cfg.Redis.DB < 0 { - return infraerrors.BadRequest(backupInvalidArgumentReason, "redis.db must be >= 0") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "redis.db must be >= 0") } if cfg.S3.Enabled { @@ -833,20 +833,20 @@ func validateDataManagementConfig(cfg DataManagementConfig) error { func validateS3Config(cfg DataManagementS3Config) error { if strings.TrimSpace(cfg.Region) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "s3.region is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "s3.region is required") } if strings.TrimSpace(cfg.Bucket) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "s3.bucket is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "s3.bucket is required") } return nil } func validateS3ProfileInput(profileID, name string, s3Cfg DataManagementS3Config) error { if strings.TrimSpace(profileID) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "profile_id is required") } if strings.TrimSpace(name) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "name is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "name is required") } if s3Cfg.Enabled { return validateS3Config(s3Cfg) @@ -857,20 +857,19 @@ func validateS3ProfileInput(profileID, name string, s3Cfg DataManagementS3Config func validateSourceProfileInput(sourceType, profileID, name string) error { sourceType = strings.TrimSpace(sourceType) if sourceType != "postgres" && sourceType != "redis" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "source_type must be postgres or redis") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "source_type must be postgres or redis") } if strings.TrimSpace(profileID) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "profile_id is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "profile_id is required") } if strings.TrimSpace(name) == "" { - return infraerrors.BadRequest(backupInvalidArgumentReason, "name is required") + return infraerrors.BadRequest(dataManagementInvalidArgumentReason, "name is required") } return nil } -func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataManagementAgentInfo, error) { - socketPath := s.SocketPath() - conn, err := s.dialBackupAgent(ctx, socketPath) +func (s *DataManagementService) probeAgentHealth(ctx context.Context, socketPath string) (*DataManagementAgentInfo, error) { + conn, err := s.dialDataManagementAgent(ctx, socketPath) if err != nil { return nil, err } @@ -889,7 +888,7 @@ func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataMan } statusText := strings.TrimSpace(resp.GetStatus()) if statusText == "" { - return nil, fmt.Errorf("empty backup health status") + return nil, fmt.Errorf("empty data management health status") } return &DataManagementAgentInfo{ Status: statusText, @@ -898,9 +897,9 @@ func (s *DataManagementService) probeBackupHealth(ctx context.Context) (*DataMan }, nil } -func (s *DataManagementService) dialBackupAgent(ctx context.Context, socketPath string) (*grpc.ClientConn, error) { +func (s *DataManagementService) dialDataManagementAgent(ctx context.Context, socketPath string) (*grpc.ClientConn, error) { conn, err := grpc.NewClient( - "passthrough:///backup-agent", + "passthrough:///datamanagement-agent", grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithContextDialer(func(dialCtx context.Context, _ string) (net.Conn, error) { dialer := net.Dialer{Timeout: s.dialTimeout} @@ -926,7 +925,7 @@ func (s *DataManagementService) dialBackupAgent(ctx context.Context, socketPath } if state == connectivity.Shutdown { _ = conn.Close() - return nil, errors.New("backup agent grpc connection shutdown") + return nil, errors.New("data management agent grpc connection shutdown") } if conn.WaitForStateChange(waitCtx, state) { continue diff --git a/backend/internal/service/data_management_grpc_test.go b/backend/internal/service/data_management_grpc_test.go index 6de435e7b..909a263c5 100644 --- a/backend/internal/service/data_management_grpc_test.go +++ b/backend/internal/service/data_management_grpc_test.go @@ -17,10 +17,10 @@ import ( grpcstatus "google.golang.org/grpc/status" ) -func TestMapBackupGRPCError(t *testing.T) { +func TestMapDataManagementGRPCError(t *testing.T) { t.Parallel() - socketPath := "/tmp/sub2api-backup.sock" + socketPath := "/tmp/sub2api-datamanagement.sock" testCases := []struct { name string err error @@ -31,25 +31,25 @@ func TestMapBackupGRPCError(t *testing.T) { name: "invalid argument", err: grpcstatus.Error(codes.InvalidArgument, "bad request"), wantCode: 400, - wantReason: backupInvalidArgumentReason, + wantReason: dataManagementInvalidArgumentReason, }, { name: "not found", err: grpcstatus.Error(codes.NotFound, "not found"), wantCode: 404, - wantReason: backupResourceNotFoundReason, + wantReason: dataManagementResourceNotFoundReason, }, { name: "already exists", err: grpcstatus.Error(codes.AlreadyExists, "exists"), wantCode: 409, - wantReason: backupResourceConflictReason, + wantReason: dataManagementResourceConflictReason, }, { name: "failed precondition", err: grpcstatus.Error(codes.FailedPrecondition, "precondition failed"), wantCode: 412, - wantReason: backupFailedPrecondition, + wantReason: dataManagementFailedPrecondition, }, { name: "unavailable", @@ -61,19 +61,19 @@ func TestMapBackupGRPCError(t *testing.T) { name: "deadline exceeded", err: grpcstatus.Error(codes.DeadlineExceeded, "timeout"), wantCode: 504, - wantReason: backupAgentTimeoutReason, + wantReason: dataManagementAgentTimeoutReason, }, { name: "internal fallback", err: grpcstatus.Error(codes.Internal, "internal"), wantCode: 500, - wantReason: backupAgentInternalReason, + wantReason: dataManagementAgentInternalReason, }, { name: "non grpc error", err: errors.New("plain error"), wantCode: 500, - wantReason: backupAgentInternalReason, + wantReason: dataManagementAgentInternalReason, }, } @@ -82,7 +82,7 @@ func TestMapBackupGRPCError(t *testing.T) { t.Run(tc.name, func(t *testing.T) { t.Parallel() - mapped := mapBackupGRPCError(tc.err, socketPath) + mapped := mapDataManagementGRPCError(tc.err, socketPath) statusCode, body := infraerrors.ToHTTP(mapped) require.Equal(t, tc.wantCode, statusCode) @@ -100,7 +100,7 @@ func TestValidateDataManagementConfig(t *testing.T) { valid := DataManagementConfig{ SourceMode: "direct", - BackupRoot: "/var/lib/sub2api/backups", + BackupRoot: "/var/lib/sub2api/datamanagement", RetentionDays: 7, KeepLast: 30, Postgres: DataManagementPostgresConfig{ @@ -134,7 +134,7 @@ func TestValidateDataManagementConfig(t *testing.T) { require.Error(t, validateDataManagementConfig(s3EnabledMissingBucket)) } -func TestDataManagementService_DialBackupAgent_TimeoutDisabled(t *testing.T) { +func TestDataManagementService_DialDataManagementAgent_TimeoutDisabled(t *testing.T) { t.Parallel() socketPath := filepath.Join("/tmp", fmt.Sprintf("s2dm0-%d.sock", time.Now().UnixNano())) @@ -145,13 +145,13 @@ func TestDataManagementService_DialBackupAgent_TimeoutDisabled(t *testing.T) { dialTimeout: 0, } - conn, err := svc.dialBackupAgent(context.Background(), socketPath) + conn, err := svc.dialDataManagementAgent(context.Background(), socketPath) require.NoError(t, err) require.NotNil(t, conn) require.NoError(t, conn.Close()) } -func TestDataManagementService_DialBackupAgent_TimeoutExceeded(t *testing.T) { +func TestDataManagementService_DialDataManagementAgent_TimeoutExceeded(t *testing.T) { t.Parallel() socketPath := filepath.Join(t.TempDir(), "missing.sock") @@ -160,7 +160,7 @@ func TestDataManagementService_DialBackupAgent_TimeoutExceeded(t *testing.T) { dialTimeout: 30 * time.Millisecond, } - conn, err := svc.dialBackupAgent(context.Background(), socketPath) + conn, err := svc.dialDataManagementAgent(context.Background(), socketPath) require.Nil(t, conn) require.Error(t, err) require.ErrorIs(t, err, context.DeadlineExceeded) diff --git a/backend/internal/service/data_management_service.go b/backend/internal/service/data_management_service.go index 1047a891c..0b2464280 100644 --- a/backend/internal/service/data_management_service.go +++ b/backend/internal/service/data_management_service.go @@ -12,21 +12,31 @@ import ( ) const ( - DefaultBackupAgentSocketPath = "/tmp/sub2api-backup.sock" + DefaultDataManagementAgentSocketPath = "/tmp/sub2api-datamanagement.sock" + LegacyBackupAgentSocketPath = "/tmp/sub2api-backup.sock" - BackupAgentSocketMissingReason = "BACKUP_AGENT_SOCKET_MISSING" - BackupAgentUnavailableReason = "BACKUP_AGENT_UNAVAILABLE" + DataManagementAgentSocketMissingReason = "DATA_MANAGEMENT_AGENT_SOCKET_MISSING" + DataManagementAgentUnavailableReason = "DATA_MANAGEMENT_AGENT_UNAVAILABLE" + + // Deprecated: keep old names for compatibility. + DefaultBackupAgentSocketPath = DefaultDataManagementAgentSocketPath + BackupAgentSocketMissingReason = DataManagementAgentSocketMissingReason + BackupAgentUnavailableReason = DataManagementAgentUnavailableReason ) var ( - ErrBackupAgentSocketMissing = infraerrors.ServiceUnavailable( - BackupAgentSocketMissingReason, - "backup agent socket is missing", + ErrDataManagementAgentSocketMissing = infraerrors.ServiceUnavailable( + DataManagementAgentSocketMissingReason, + "data management agent socket is missing", ) - ErrBackupAgentUnavailable = infraerrors.ServiceUnavailable( - BackupAgentUnavailableReason, - "backup agent is unavailable", + ErrDataManagementAgentUnavailable = infraerrors.ServiceUnavailable( + DataManagementAgentUnavailableReason, + "data management agent is unavailable", ) + + // Deprecated: keep old names for compatibility. + ErrBackupAgentSocketMissing = ErrDataManagementAgentSocketMissing + ErrBackupAgentUnavailable = ErrDataManagementAgentUnavailable ) type DataManagementAgentHealth struct { @@ -48,13 +58,13 @@ type DataManagementService struct { } func NewDataManagementService() *DataManagementService { - return NewDataManagementServiceWithOptions(DefaultBackupAgentSocketPath, 500*time.Millisecond) + return NewDataManagementServiceWithOptions(DefaultDataManagementAgentSocketPath, 500*time.Millisecond) } func NewDataManagementServiceWithOptions(socketPath string, dialTimeout time.Duration) *DataManagementService { path := strings.TrimSpace(socketPath) if path == "" { - path = DefaultBackupAgentSocketPath + path = DefaultDataManagementAgentSocketPath } if dialTimeout <= 0 { dialTimeout = 500 * time.Millisecond @@ -67,23 +77,41 @@ func NewDataManagementServiceWithOptions(socketPath string, dialTimeout time.Dur func (s *DataManagementService) SocketPath() string { if s == nil || strings.TrimSpace(s.socketPath) == "" { - return DefaultBackupAgentSocketPath + return DefaultDataManagementAgentSocketPath } return s.socketPath } func (s *DataManagementService) GetAgentHealth(ctx context.Context) DataManagementAgentHealth { - socketPath := s.SocketPath() + primaryPath := s.SocketPath() + health := s.getAgentHealthBySocket(ctx, primaryPath) + if health.Enabled || primaryPath != DefaultDataManagementAgentSocketPath { + return health + } + + fallbackPath := strings.TrimSpace(LegacyBackupAgentSocketPath) + if fallbackPath == "" || fallbackPath == primaryPath { + return health + } + + fallback := s.getAgentHealthBySocket(ctx, fallbackPath) + if fallback.Enabled { + return fallback + } + return health +} + +func (s *DataManagementService) getAgentHealthBySocket(ctx context.Context, socketPath string) DataManagementAgentHealth { health := DataManagementAgentHealth{ Enabled: false, - Reason: BackupAgentUnavailableReason, + Reason: DataManagementAgentUnavailableReason, SocketPath: socketPath, } info, err := os.Stat(socketPath) if err != nil { if errors.Is(err, os.ErrNotExist) { - health.Reason = BackupAgentSocketMissingReason + health.Reason = DataManagementAgentSocketMissingReason } return health } @@ -98,7 +126,7 @@ func (s *DataManagementService) GetAgentHealth(ctx context.Context) DataManageme } _ = conn.Close() - agent, err := s.probeBackupHealth(ctx) + agent, err := s.probeAgentHealth(ctx, socketPath) if err != nil { return health } @@ -116,8 +144,8 @@ func (s *DataManagementService) EnsureAgentEnabled(ctx context.Context) error { } metadata := map[string]string{"socket_path": health.SocketPath} - if health.Reason == BackupAgentSocketMissingReason { - return ErrBackupAgentSocketMissing.WithMetadata(metadata) + if health.Reason == DataManagementAgentSocketMissingReason { + return ErrDataManagementAgentSocketMissing.WithMetadata(metadata) } - return ErrBackupAgentUnavailable.WithMetadata(metadata) + return ErrDataManagementAgentUnavailable.WithMetadata(metadata) } diff --git a/backend/internal/service/data_management_service_test.go b/backend/internal/service/data_management_service_test.go index 08574fe56..27688cd93 100644 --- a/backend/internal/service/data_management_service_test.go +++ b/backend/internal/service/data_management_service_test.go @@ -22,7 +22,7 @@ func TestDataManagementService_GetAgentHealth_SocketMissing(t *testing.T) { health := svc.GetAgentHealth(context.Background()) require.False(t, health.Enabled) - require.Equal(t, BackupAgentSocketMissingReason, health.Reason) + require.Equal(t, DataManagementAgentSocketMissingReason, health.Reason) require.NotEmpty(t, health.SocketPath) } @@ -40,7 +40,7 @@ func TestDataManagementService_GetAgentHealth_SocketReachable(t *testing.T) { require.Equal(t, socketPath, health.SocketPath) require.NotNil(t, health.Agent) require.Equal(t, "SERVING", health.Agent.Status) - require.Equal(t, "test-backupd", health.Agent.Version) + require.Equal(t, "test-datamanagementd", health.Agent.Version) require.EqualValues(t, 42, health.Agent.UptimeSeconds) } @@ -53,7 +53,7 @@ func TestDataManagementService_EnsureAgentEnabled(t *testing.T) { statusCode, status := infraerrors.ToHTTP(err) require.Equal(t, 503, statusCode) - require.Equal(t, BackupAgentSocketMissingReason, status.Reason) + require.Equal(t, DataManagementAgentSocketMissingReason, status.Reason) require.Equal(t, svc.SocketPath(), status.Metadata["socket_path"]) } @@ -85,7 +85,7 @@ type testBackupHealthServer struct { func (s *testBackupHealthServer) Health(context.Context, *backupv1.HealthRequest) (*backupv1.HealthResponse, error) { return &backupv1.HealthResponse{ Status: "SERVING", - Version: "test-backupd", + Version: "test-datamanagementd", UptimeSeconds: 42, }, nil } diff --git a/backup/README.md b/backup/README.md deleted file mode 100644 index 4c9845239..000000000 --- a/backup/README.md +++ /dev/null @@ -1,49 +0,0 @@ -# backupd 备份进程 - -`backupd` 是运行在宿主机上的备份执行进程,负责: - -- 接收主进程通过 gRPC Unix Socket 发起的备份管理请求 -- 执行 PostgreSQL / Redis / Full 备份任务 -- 将备份产物可选上传到标准 S3(`aws-sdk-go-v2`) -- 使用 Ent + SQLite 持久化备份配置与任务状态 - -## 1. 本地构建 - -```bash -cd backup -go build -o backupd ./cmd/backupd -``` - -## 2. 本地运行 - -```bash -cd backup -./backupd \ - -socket-path /tmp/sub2api-backup.sock \ - -sqlite-path /tmp/sub2api-backupd.db \ - -version dev -``` - -默认参数: - -- `-socket-path`: `/tmp/sub2api-backup.sock` -- `-sqlite-path`: `/tmp/sub2api-backupd.db` -- `-version`: `dev` - -## 3. 依赖要求 - -- PostgreSQL 客户端:`pg_dump` -- Redis 客户端:`redis-cli` -- 若使用 `docker_exec` 源模式:`docker` - -## 4. 与主进程协作要求 - -- 主进程固定探测 `/tmp/sub2api-backup.sock` -- 只有探测到该 UDS 且 `Health` 成功时,管理后台“数据管理”功能才会启用 -- `backupd` 本身不做业务鉴权,依赖主进程管理员鉴权 + UDS 文件权限 - -## 5. 生产建议 - -- 使用 `systemd` 托管进程(参考 `deploy/sub2api-backupd.service`) -- 建议 `backupd` 与 `sub2api` 在同一宿主机运行 -- 若 `sub2api` 在 Docker 容器内,需把宿主机 `/tmp/sub2api-backup.sock` 挂载到容器内同路径 diff --git a/backup/cmd/backupd/main.go b/backup/cmd/backupd/main.go deleted file mode 100644 index 8597b90c9..000000000 --- a/backup/cmd/backupd/main.go +++ /dev/null @@ -1,114 +0,0 @@ -package main - -import ( - "context" - "errors" - "flag" - "log" - "net" - "os" - "os/signal" - "strings" - "syscall" - "time" - - "github.com/Wei-Shaw/sub2api/backup/internal/executor" - "github.com/Wei-Shaw/sub2api/backup/internal/grpcserver" - "github.com/Wei-Shaw/sub2api/backup/internal/store/entstore" - backupv1 "github.com/Wei-Shaw/sub2api/backup/proto/backup/v1" - "google.golang.org/grpc" - "google.golang.org/grpc/health" - healthpb "google.golang.org/grpc/health/grpc_health_v1" -) - -func main() { - socketPath := flag.String("socket-path", "/tmp/sub2api-backup.sock", "backupd unix socket path") - sqlitePath := flag.String("sqlite-path", "/tmp/sub2api-backupd.db", "backupd sqlite database path") - version := flag.String("version", "dev", "backupd version") - flag.Parse() - - if err := run(strings.TrimSpace(*socketPath), strings.TrimSpace(*sqlitePath), strings.TrimSpace(*version)); err != nil { - log.Fatalf("backupd start failed: %v", err) - } -} - -func run(socketPath, sqlitePath, version string) error { - if socketPath == "" { - socketPath = "/tmp/sub2api-backup.sock" - } - if sqlitePath == "" { - sqlitePath = "/tmp/sub2api-backupd.db" - } - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - store, err := entstore.Open(ctx, sqlitePath) - if err != nil { - return err - } - defer func() { - _ = store.Close() - }() - - runner := executor.NewRunner(store, executor.Options{Logger: log.Default()}) - if err := runner.Start(); err != nil { - return err - } - defer func() { - stopCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - _ = runner.Stop(stopCtx) - }() - - if err := os.Remove(socketPath); err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - - listener, err := net.Listen("unix", socketPath) - if err != nil { - return err - } - defer func() { - _ = listener.Close() - _ = os.Remove(socketPath) - }() - - if err := os.Chmod(socketPath, 0o660); err != nil { - return err - } - - grpcServer := grpc.NewServer(grpc.UnaryInterceptor(grpcserver.UnaryServerInterceptor(log.Default()))) - healthServer := health.NewServer() - healthpb.RegisterHealthServer(grpcServer, healthServer) - healthServer.SetServingStatus("", healthpb.HealthCheckResponse_SERVING) - healthServer.SetServingStatus("backup.v1.BackupService", healthpb.HealthCheckResponse_SERVING) - backupv1.RegisterBackupServiceServer(grpcServer, grpcserver.New(store, version, runner)) - - errCh := make(chan error, 1) - go func() { - log.Printf("backupd listening on %s", socketPath) - errCh <- grpcServer.Serve(listener) - }() - - sigCh := make(chan os.Signal, 1) - signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM) - - select { - case sig := <-sigCh: - log.Printf("backupd shutting down, signal=%s", sig.String()) - stopped := make(chan struct{}) - go func() { - grpcServer.GracefulStop() - close(stopped) - }() - select { - case <-stopped: - return nil - case <-time.After(5 * time.Second): - grpcServer.Stop() - return nil - } - case err := <-errCh: - return err - } -} diff --git a/backup/ent/backupjob.go b/backup/ent/backupjob.go deleted file mode 100644 index e18839be3..000000000 --- a/backup/ent/backupjob.go +++ /dev/null @@ -1,352 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "fmt" - "strings" - "time" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" -) - -// BackupJob is the model entity for the BackupJob schema. -type BackupJob struct { - config `json:"-"` - // ID of the ent. - ID int `json:"id,omitempty"` - // JobID holds the value of the "job_id" field. - JobID string `json:"job_id,omitempty"` - // BackupType holds the value of the "backup_type" field. - BackupType backupjob.BackupType `json:"backup_type,omitempty"` - // Status holds the value of the "status" field. - Status backupjob.Status `json:"status,omitempty"` - // TriggeredBy holds the value of the "triggered_by" field. - TriggeredBy string `json:"triggered_by,omitempty"` - // IdempotencyKey holds the value of the "idempotency_key" field. - IdempotencyKey string `json:"idempotency_key,omitempty"` - // UploadToS3 holds the value of the "upload_to_s3" field. - UploadToS3 bool `json:"upload_to_s3,omitempty"` - // S3ProfileID holds the value of the "s3_profile_id" field. - S3ProfileID string `json:"s3_profile_id,omitempty"` - // PostgresProfileID holds the value of the "postgres_profile_id" field. - PostgresProfileID string `json:"postgres_profile_id,omitempty"` - // RedisProfileID holds the value of the "redis_profile_id" field. - RedisProfileID string `json:"redis_profile_id,omitempty"` - // StartedAt holds the value of the "started_at" field. - StartedAt *time.Time `json:"started_at,omitempty"` - // FinishedAt holds the value of the "finished_at" field. - FinishedAt *time.Time `json:"finished_at,omitempty"` - // ErrorMessage holds the value of the "error_message" field. - ErrorMessage string `json:"error_message,omitempty"` - // ArtifactLocalPath holds the value of the "artifact_local_path" field. - ArtifactLocalPath string `json:"artifact_local_path,omitempty"` - // ArtifactSizeBytes holds the value of the "artifact_size_bytes" field. - ArtifactSizeBytes *int64 `json:"artifact_size_bytes,omitempty"` - // ArtifactSha256 holds the value of the "artifact_sha256" field. - ArtifactSha256 string `json:"artifact_sha256,omitempty"` - // S3Bucket holds the value of the "s3_bucket" field. - S3Bucket string `json:"s3_bucket,omitempty"` - // S3Key holds the value of the "s3_key" field. - S3Key string `json:"s3_key,omitempty"` - // S3Etag holds the value of the "s3_etag" field. - S3Etag string `json:"s3_etag,omitempty"` - // CreatedAt holds the value of the "created_at" field. - CreatedAt time.Time `json:"created_at,omitempty"` - // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt time.Time `json:"updated_at,omitempty"` - // Edges holds the relations/edges for other nodes in the graph. - // The values are being populated by the BackupJobQuery when eager-loading is set. - Edges BackupJobEdges `json:"edges"` - selectValues sql.SelectValues -} - -// BackupJobEdges holds the relations/edges for other nodes in the graph. -type BackupJobEdges struct { - // Events holds the value of the events edge. - Events []*BackupJobEvent `json:"events,omitempty"` - // loadedTypes holds the information for reporting if a - // type was loaded (or requested) in eager-loading or not. - loadedTypes [1]bool -} - -// EventsOrErr returns the Events value or an error if the edge -// was not loaded in eager-loading. -func (e BackupJobEdges) EventsOrErr() ([]*BackupJobEvent, error) { - if e.loadedTypes[0] { - return e.Events, nil - } - return nil, &NotLoadedError{edge: "events"} -} - -// scanValues returns the types for scanning values from sql.Rows. -func (*BackupJob) scanValues(columns []string) ([]any, error) { - values := make([]any, len(columns)) - for i := range columns { - switch columns[i] { - case backupjob.FieldUploadToS3: - values[i] = new(sql.NullBool) - case backupjob.FieldID, backupjob.FieldArtifactSizeBytes: - values[i] = new(sql.NullInt64) - case backupjob.FieldJobID, backupjob.FieldBackupType, backupjob.FieldStatus, backupjob.FieldTriggeredBy, backupjob.FieldIdempotencyKey, backupjob.FieldS3ProfileID, backupjob.FieldPostgresProfileID, backupjob.FieldRedisProfileID, backupjob.FieldErrorMessage, backupjob.FieldArtifactLocalPath, backupjob.FieldArtifactSha256, backupjob.FieldS3Bucket, backupjob.FieldS3Key, backupjob.FieldS3Etag: - values[i] = new(sql.NullString) - case backupjob.FieldStartedAt, backupjob.FieldFinishedAt, backupjob.FieldCreatedAt, backupjob.FieldUpdatedAt: - values[i] = new(sql.NullTime) - default: - values[i] = new(sql.UnknownType) - } - } - return values, nil -} - -// assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the BackupJob fields. -func (_m *BackupJob) assignValues(columns []string, values []any) error { - if m, n := len(values), len(columns); m < n { - return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) - } - for i := range columns { - switch columns[i] { - case backupjob.FieldID: - value, ok := values[i].(*sql.NullInt64) - if !ok { - return fmt.Errorf("unexpected type %T for field id", value) - } - _m.ID = int(value.Int64) - case backupjob.FieldJobID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field job_id", values[i]) - } else if value.Valid { - _m.JobID = value.String - } - case backupjob.FieldBackupType: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field backup_type", values[i]) - } else if value.Valid { - _m.BackupType = backupjob.BackupType(value.String) - } - case backupjob.FieldStatus: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field status", values[i]) - } else if value.Valid { - _m.Status = backupjob.Status(value.String) - } - case backupjob.FieldTriggeredBy: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field triggered_by", values[i]) - } else if value.Valid { - _m.TriggeredBy = value.String - } - case backupjob.FieldIdempotencyKey: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field idempotency_key", values[i]) - } else if value.Valid { - _m.IdempotencyKey = value.String - } - case backupjob.FieldUploadToS3: - if value, ok := values[i].(*sql.NullBool); !ok { - return fmt.Errorf("unexpected type %T for field upload_to_s3", values[i]) - } else if value.Valid { - _m.UploadToS3 = value.Bool - } - case backupjob.FieldS3ProfileID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field s3_profile_id", values[i]) - } else if value.Valid { - _m.S3ProfileID = value.String - } - case backupjob.FieldPostgresProfileID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field postgres_profile_id", values[i]) - } else if value.Valid { - _m.PostgresProfileID = value.String - } - case backupjob.FieldRedisProfileID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field redis_profile_id", values[i]) - } else if value.Valid { - _m.RedisProfileID = value.String - } - case backupjob.FieldStartedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field started_at", values[i]) - } else if value.Valid { - _m.StartedAt = new(time.Time) - *_m.StartedAt = value.Time - } - case backupjob.FieldFinishedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field finished_at", values[i]) - } else if value.Valid { - _m.FinishedAt = new(time.Time) - *_m.FinishedAt = value.Time - } - case backupjob.FieldErrorMessage: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field error_message", values[i]) - } else if value.Valid { - _m.ErrorMessage = value.String - } - case backupjob.FieldArtifactLocalPath: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field artifact_local_path", values[i]) - } else if value.Valid { - _m.ArtifactLocalPath = value.String - } - case backupjob.FieldArtifactSizeBytes: - if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field artifact_size_bytes", values[i]) - } else if value.Valid { - _m.ArtifactSizeBytes = new(int64) - *_m.ArtifactSizeBytes = value.Int64 - } - case backupjob.FieldArtifactSha256: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field artifact_sha256", values[i]) - } else if value.Valid { - _m.ArtifactSha256 = value.String - } - case backupjob.FieldS3Bucket: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field s3_bucket", values[i]) - } else if value.Valid { - _m.S3Bucket = value.String - } - case backupjob.FieldS3Key: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field s3_key", values[i]) - } else if value.Valid { - _m.S3Key = value.String - } - case backupjob.FieldS3Etag: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field s3_etag", values[i]) - } else if value.Valid { - _m.S3Etag = value.String - } - case backupjob.FieldCreatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field created_at", values[i]) - } else if value.Valid { - _m.CreatedAt = value.Time - } - case backupjob.FieldUpdatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field updated_at", values[i]) - } else if value.Valid { - _m.UpdatedAt = value.Time - } - default: - _m.selectValues.Set(columns[i], values[i]) - } - } - return nil -} - -// Value returns the ent.Value that was dynamically selected and assigned to the BackupJob. -// This includes values selected through modifiers, order, etc. -func (_m *BackupJob) Value(name string) (ent.Value, error) { - return _m.selectValues.Get(name) -} - -// QueryEvents queries the "events" edge of the BackupJob entity. -func (_m *BackupJob) QueryEvents() *BackupJobEventQuery { - return NewBackupJobClient(_m.config).QueryEvents(_m) -} - -// Update returns a builder for updating this BackupJob. -// Note that you need to call BackupJob.Unwrap() before calling this method if this BackupJob -// was returned from a transaction, and the transaction was committed or rolled back. -func (_m *BackupJob) Update() *BackupJobUpdateOne { - return NewBackupJobClient(_m.config).UpdateOne(_m) -} - -// Unwrap unwraps the BackupJob entity that was returned from a transaction after it was closed, -// so that all future queries will be executed through the driver which created the transaction. -func (_m *BackupJob) Unwrap() *BackupJob { - _tx, ok := _m.config.driver.(*txDriver) - if !ok { - panic("ent: BackupJob is not a transactional entity") - } - _m.config.driver = _tx.drv - return _m -} - -// String implements the fmt.Stringer. -func (_m *BackupJob) String() string { - var builder strings.Builder - builder.WriteString("BackupJob(") - builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) - builder.WriteString("job_id=") - builder.WriteString(_m.JobID) - builder.WriteString(", ") - builder.WriteString("backup_type=") - builder.WriteString(fmt.Sprintf("%v", _m.BackupType)) - builder.WriteString(", ") - builder.WriteString("status=") - builder.WriteString(fmt.Sprintf("%v", _m.Status)) - builder.WriteString(", ") - builder.WriteString("triggered_by=") - builder.WriteString(_m.TriggeredBy) - builder.WriteString(", ") - builder.WriteString("idempotency_key=") - builder.WriteString(_m.IdempotencyKey) - builder.WriteString(", ") - builder.WriteString("upload_to_s3=") - builder.WriteString(fmt.Sprintf("%v", _m.UploadToS3)) - builder.WriteString(", ") - builder.WriteString("s3_profile_id=") - builder.WriteString(_m.S3ProfileID) - builder.WriteString(", ") - builder.WriteString("postgres_profile_id=") - builder.WriteString(_m.PostgresProfileID) - builder.WriteString(", ") - builder.WriteString("redis_profile_id=") - builder.WriteString(_m.RedisProfileID) - builder.WriteString(", ") - if v := _m.StartedAt; v != nil { - builder.WriteString("started_at=") - builder.WriteString(v.Format(time.ANSIC)) - } - builder.WriteString(", ") - if v := _m.FinishedAt; v != nil { - builder.WriteString("finished_at=") - builder.WriteString(v.Format(time.ANSIC)) - } - builder.WriteString(", ") - builder.WriteString("error_message=") - builder.WriteString(_m.ErrorMessage) - builder.WriteString(", ") - builder.WriteString("artifact_local_path=") - builder.WriteString(_m.ArtifactLocalPath) - builder.WriteString(", ") - if v := _m.ArtifactSizeBytes; v != nil { - builder.WriteString("artifact_size_bytes=") - builder.WriteString(fmt.Sprintf("%v", *v)) - } - builder.WriteString(", ") - builder.WriteString("artifact_sha256=") - builder.WriteString(_m.ArtifactSha256) - builder.WriteString(", ") - builder.WriteString("s3_bucket=") - builder.WriteString(_m.S3Bucket) - builder.WriteString(", ") - builder.WriteString("s3_key=") - builder.WriteString(_m.S3Key) - builder.WriteString(", ") - builder.WriteString("s3_etag=") - builder.WriteString(_m.S3Etag) - builder.WriteString(", ") - builder.WriteString("created_at=") - builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) - builder.WriteString(", ") - builder.WriteString("updated_at=") - builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) - builder.WriteByte(')') - return builder.String() -} - -// BackupJobs is a parsable slice of BackupJob. -type BackupJobs []*BackupJob diff --git a/backup/ent/backupjob/backupjob.go b/backup/ent/backupjob/backupjob.go deleted file mode 100644 index 1f759a8a6..000000000 --- a/backup/ent/backupjob/backupjob.go +++ /dev/null @@ -1,299 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupjob - -import ( - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" -) - -const ( - // Label holds the string label denoting the backupjob type in the database. - Label = "backup_job" - // FieldID holds the string denoting the id field in the database. - FieldID = "id" - // FieldJobID holds the string denoting the job_id field in the database. - FieldJobID = "job_id" - // FieldBackupType holds the string denoting the backup_type field in the database. - FieldBackupType = "backup_type" - // FieldStatus holds the string denoting the status field in the database. - FieldStatus = "status" - // FieldTriggeredBy holds the string denoting the triggered_by field in the database. - FieldTriggeredBy = "triggered_by" - // FieldIdempotencyKey holds the string denoting the idempotency_key field in the database. - FieldIdempotencyKey = "idempotency_key" - // FieldUploadToS3 holds the string denoting the upload_to_s3 field in the database. - FieldUploadToS3 = "upload_to_s3" - // FieldS3ProfileID holds the string denoting the s3_profile_id field in the database. - FieldS3ProfileID = "s3_profile_id" - // FieldPostgresProfileID holds the string denoting the postgres_profile_id field in the database. - FieldPostgresProfileID = "postgres_profile_id" - // FieldRedisProfileID holds the string denoting the redis_profile_id field in the database. - FieldRedisProfileID = "redis_profile_id" - // FieldStartedAt holds the string denoting the started_at field in the database. - FieldStartedAt = "started_at" - // FieldFinishedAt holds the string denoting the finished_at field in the database. - FieldFinishedAt = "finished_at" - // FieldErrorMessage holds the string denoting the error_message field in the database. - FieldErrorMessage = "error_message" - // FieldArtifactLocalPath holds the string denoting the artifact_local_path field in the database. - FieldArtifactLocalPath = "artifact_local_path" - // FieldArtifactSizeBytes holds the string denoting the artifact_size_bytes field in the database. - FieldArtifactSizeBytes = "artifact_size_bytes" - // FieldArtifactSha256 holds the string denoting the artifact_sha256 field in the database. - FieldArtifactSha256 = "artifact_sha256" - // FieldS3Bucket holds the string denoting the s3_bucket field in the database. - FieldS3Bucket = "s3_bucket" - // FieldS3Key holds the string denoting the s3_key field in the database. - FieldS3Key = "s3_key" - // FieldS3Etag holds the string denoting the s3_etag field in the database. - FieldS3Etag = "s3_etag" - // FieldCreatedAt holds the string denoting the created_at field in the database. - FieldCreatedAt = "created_at" - // FieldUpdatedAt holds the string denoting the updated_at field in the database. - FieldUpdatedAt = "updated_at" - // EdgeEvents holds the string denoting the events edge name in mutations. - EdgeEvents = "events" - // Table holds the table name of the backupjob in the database. - Table = "backup_jobs" - // EventsTable is the table that holds the events relation/edge. - EventsTable = "backup_job_events" - // EventsInverseTable is the table name for the BackupJobEvent entity. - // It exists in this package in order to avoid circular dependency with the "backupjobevent" package. - EventsInverseTable = "backup_job_events" - // EventsColumn is the table column denoting the events relation/edge. - EventsColumn = "backup_job_id" -) - -// Columns holds all SQL columns for backupjob fields. -var Columns = []string{ - FieldID, - FieldJobID, - FieldBackupType, - FieldStatus, - FieldTriggeredBy, - FieldIdempotencyKey, - FieldUploadToS3, - FieldS3ProfileID, - FieldPostgresProfileID, - FieldRedisProfileID, - FieldStartedAt, - FieldFinishedAt, - FieldErrorMessage, - FieldArtifactLocalPath, - FieldArtifactSizeBytes, - FieldArtifactSha256, - FieldS3Bucket, - FieldS3Key, - FieldS3Etag, - FieldCreatedAt, - FieldUpdatedAt, -} - -// ValidColumn reports if the column name is valid (part of the table columns). -func ValidColumn(column string) bool { - for i := range Columns { - if column == Columns[i] { - return true - } - } - return false -} - -var ( - // DefaultTriggeredBy holds the default value on creation for the "triggered_by" field. - DefaultTriggeredBy string - // DefaultUploadToS3 holds the default value on creation for the "upload_to_s3" field. - DefaultUploadToS3 bool - // DefaultCreatedAt holds the default value on creation for the "created_at" field. - DefaultCreatedAt func() time.Time - // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. - DefaultUpdatedAt func() time.Time - // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. - UpdateDefaultUpdatedAt func() time.Time -) - -// BackupType defines the type for the "backup_type" enum field. -type BackupType string - -// BackupType values. -const ( - BackupTypePostgres BackupType = "postgres" - BackupTypeRedis BackupType = "redis" - BackupTypeFull BackupType = "full" -) - -func (bt BackupType) String() string { - return string(bt) -} - -// BackupTypeValidator is a validator for the "backup_type" field enum values. It is called by the builders before save. -func BackupTypeValidator(bt BackupType) error { - switch bt { - case BackupTypePostgres, BackupTypeRedis, BackupTypeFull: - return nil - default: - return fmt.Errorf("backupjob: invalid enum value for backup_type field: %q", bt) - } -} - -// Status defines the type for the "status" enum field. -type Status string - -// StatusQueued is the default value of the Status enum. -const DefaultStatus = StatusQueued - -// Status values. -const ( - StatusQueued Status = "queued" - StatusRunning Status = "running" - StatusSucceeded Status = "succeeded" - StatusFailed Status = "failed" - StatusPartialSucceeded Status = "partial_succeeded" -) - -func (s Status) String() string { - return string(s) -} - -// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. -func StatusValidator(s Status) error { - switch s { - case StatusQueued, StatusRunning, StatusSucceeded, StatusFailed, StatusPartialSucceeded: - return nil - default: - return fmt.Errorf("backupjob: invalid enum value for status field: %q", s) - } -} - -// OrderOption defines the ordering options for the BackupJob queries. -type OrderOption func(*sql.Selector) - -// ByID orders the results by the id field. -func ByID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldID, opts...).ToFunc() -} - -// ByJobID orders the results by the job_id field. -func ByJobID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldJobID, opts...).ToFunc() -} - -// ByBackupType orders the results by the backup_type field. -func ByBackupType(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldBackupType, opts...).ToFunc() -} - -// ByStatus orders the results by the status field. -func ByStatus(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldStatus, opts...).ToFunc() -} - -// ByTriggeredBy orders the results by the triggered_by field. -func ByTriggeredBy(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldTriggeredBy, opts...).ToFunc() -} - -// ByIdempotencyKey orders the results by the idempotency_key field. -func ByIdempotencyKey(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldIdempotencyKey, opts...).ToFunc() -} - -// ByUploadToS3 orders the results by the upload_to_s3 field. -func ByUploadToS3(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUploadToS3, opts...).ToFunc() -} - -// ByS3ProfileID orders the results by the s3_profile_id field. -func ByS3ProfileID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldS3ProfileID, opts...).ToFunc() -} - -// ByPostgresProfileID orders the results by the postgres_profile_id field. -func ByPostgresProfileID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldPostgresProfileID, opts...).ToFunc() -} - -// ByRedisProfileID orders the results by the redis_profile_id field. -func ByRedisProfileID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldRedisProfileID, opts...).ToFunc() -} - -// ByStartedAt orders the results by the started_at field. -func ByStartedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldStartedAt, opts...).ToFunc() -} - -// ByFinishedAt orders the results by the finished_at field. -func ByFinishedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldFinishedAt, opts...).ToFunc() -} - -// ByErrorMessage orders the results by the error_message field. -func ByErrorMessage(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldErrorMessage, opts...).ToFunc() -} - -// ByArtifactLocalPath orders the results by the artifact_local_path field. -func ByArtifactLocalPath(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldArtifactLocalPath, opts...).ToFunc() -} - -// ByArtifactSizeBytes orders the results by the artifact_size_bytes field. -func ByArtifactSizeBytes(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldArtifactSizeBytes, opts...).ToFunc() -} - -// ByArtifactSha256 orders the results by the artifact_sha256 field. -func ByArtifactSha256(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldArtifactSha256, opts...).ToFunc() -} - -// ByS3Bucket orders the results by the s3_bucket field. -func ByS3Bucket(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldS3Bucket, opts...).ToFunc() -} - -// ByS3Key orders the results by the s3_key field. -func ByS3Key(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldS3Key, opts...).ToFunc() -} - -// ByS3Etag orders the results by the s3_etag field. -func ByS3Etag(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldS3Etag, opts...).ToFunc() -} - -// ByCreatedAt orders the results by the created_at field. -func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() -} - -// ByUpdatedAt orders the results by the updated_at field. -func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() -} - -// ByEventsCount orders the results by events count. -func ByEventsCount(opts ...sql.OrderTermOption) OrderOption { - return func(s *sql.Selector) { - sqlgraph.OrderByNeighborsCount(s, newEventsStep(), opts...) - } -} - -// ByEvents orders the results by events terms. -func ByEvents(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { - return func(s *sql.Selector) { - sqlgraph.OrderByNeighborTerms(s, newEventsStep(), append([]sql.OrderTerm{term}, terms...)...) - } -} -func newEventsStep() *sqlgraph.Step { - return sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(EventsInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, EventsTable, EventsColumn), - ) -} diff --git a/backup/ent/backupjob/where.go b/backup/ent/backupjob/where.go deleted file mode 100644 index 816399de2..000000000 --- a/backup/ent/backupjob/where.go +++ /dev/null @@ -1,1344 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupjob - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldID, id)) -} - -// JobID applies equality check predicate on the "job_id" field. It's identical to JobIDEQ. -func JobID(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldJobID, v)) -} - -// TriggeredBy applies equality check predicate on the "triggered_by" field. It's identical to TriggeredByEQ. -func TriggeredBy(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldTriggeredBy, v)) -} - -// IdempotencyKey applies equality check predicate on the "idempotency_key" field. It's identical to IdempotencyKeyEQ. -func IdempotencyKey(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldIdempotencyKey, v)) -} - -// UploadToS3 applies equality check predicate on the "upload_to_s3" field. It's identical to UploadToS3EQ. -func UploadToS3(v bool) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldUploadToS3, v)) -} - -// S3ProfileID applies equality check predicate on the "s3_profile_id" field. It's identical to S3ProfileIDEQ. -func S3ProfileID(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3ProfileID, v)) -} - -// PostgresProfileID applies equality check predicate on the "postgres_profile_id" field. It's identical to PostgresProfileIDEQ. -func PostgresProfileID(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldPostgresProfileID, v)) -} - -// RedisProfileID applies equality check predicate on the "redis_profile_id" field. It's identical to RedisProfileIDEQ. -func RedisProfileID(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldRedisProfileID, v)) -} - -// StartedAt applies equality check predicate on the "started_at" field. It's identical to StartedAtEQ. -func StartedAt(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldStartedAt, v)) -} - -// FinishedAt applies equality check predicate on the "finished_at" field. It's identical to FinishedAtEQ. -func FinishedAt(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldFinishedAt, v)) -} - -// ErrorMessage applies equality check predicate on the "error_message" field. It's identical to ErrorMessageEQ. -func ErrorMessage(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldErrorMessage, v)) -} - -// ArtifactLocalPath applies equality check predicate on the "artifact_local_path" field. It's identical to ArtifactLocalPathEQ. -func ArtifactLocalPath(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldArtifactLocalPath, v)) -} - -// ArtifactSizeBytes applies equality check predicate on the "artifact_size_bytes" field. It's identical to ArtifactSizeBytesEQ. -func ArtifactSizeBytes(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldArtifactSizeBytes, v)) -} - -// ArtifactSha256 applies equality check predicate on the "artifact_sha256" field. It's identical to ArtifactSha256EQ. -func ArtifactSha256(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldArtifactSha256, v)) -} - -// S3Bucket applies equality check predicate on the "s3_bucket" field. It's identical to S3BucketEQ. -func S3Bucket(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3Bucket, v)) -} - -// S3Key applies equality check predicate on the "s3_key" field. It's identical to S3KeyEQ. -func S3Key(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3Key, v)) -} - -// S3Etag applies equality check predicate on the "s3_etag" field. It's identical to S3EtagEQ. -func S3Etag(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3Etag, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldCreatedAt, v)) -} - -// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. -func UpdatedAt(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// JobIDEQ applies the EQ predicate on the "job_id" field. -func JobIDEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldJobID, v)) -} - -// JobIDNEQ applies the NEQ predicate on the "job_id" field. -func JobIDNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldJobID, v)) -} - -// JobIDIn applies the In predicate on the "job_id" field. -func JobIDIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldJobID, vs...)) -} - -// JobIDNotIn applies the NotIn predicate on the "job_id" field. -func JobIDNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldJobID, vs...)) -} - -// JobIDGT applies the GT predicate on the "job_id" field. -func JobIDGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldJobID, v)) -} - -// JobIDGTE applies the GTE predicate on the "job_id" field. -func JobIDGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldJobID, v)) -} - -// JobIDLT applies the LT predicate on the "job_id" field. -func JobIDLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldJobID, v)) -} - -// JobIDLTE applies the LTE predicate on the "job_id" field. -func JobIDLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldJobID, v)) -} - -// JobIDContains applies the Contains predicate on the "job_id" field. -func JobIDContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldJobID, v)) -} - -// JobIDHasPrefix applies the HasPrefix predicate on the "job_id" field. -func JobIDHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldJobID, v)) -} - -// JobIDHasSuffix applies the HasSuffix predicate on the "job_id" field. -func JobIDHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldJobID, v)) -} - -// JobIDEqualFold applies the EqualFold predicate on the "job_id" field. -func JobIDEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldJobID, v)) -} - -// JobIDContainsFold applies the ContainsFold predicate on the "job_id" field. -func JobIDContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldJobID, v)) -} - -// BackupTypeEQ applies the EQ predicate on the "backup_type" field. -func BackupTypeEQ(v BackupType) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldBackupType, v)) -} - -// BackupTypeNEQ applies the NEQ predicate on the "backup_type" field. -func BackupTypeNEQ(v BackupType) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldBackupType, v)) -} - -// BackupTypeIn applies the In predicate on the "backup_type" field. -func BackupTypeIn(vs ...BackupType) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldBackupType, vs...)) -} - -// BackupTypeNotIn applies the NotIn predicate on the "backup_type" field. -func BackupTypeNotIn(vs ...BackupType) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldBackupType, vs...)) -} - -// StatusEQ applies the EQ predicate on the "status" field. -func StatusEQ(v Status) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldStatus, v)) -} - -// StatusNEQ applies the NEQ predicate on the "status" field. -func StatusNEQ(v Status) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldStatus, v)) -} - -// StatusIn applies the In predicate on the "status" field. -func StatusIn(vs ...Status) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldStatus, vs...)) -} - -// StatusNotIn applies the NotIn predicate on the "status" field. -func StatusNotIn(vs ...Status) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldStatus, vs...)) -} - -// TriggeredByEQ applies the EQ predicate on the "triggered_by" field. -func TriggeredByEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldTriggeredBy, v)) -} - -// TriggeredByNEQ applies the NEQ predicate on the "triggered_by" field. -func TriggeredByNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldTriggeredBy, v)) -} - -// TriggeredByIn applies the In predicate on the "triggered_by" field. -func TriggeredByIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldTriggeredBy, vs...)) -} - -// TriggeredByNotIn applies the NotIn predicate on the "triggered_by" field. -func TriggeredByNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldTriggeredBy, vs...)) -} - -// TriggeredByGT applies the GT predicate on the "triggered_by" field. -func TriggeredByGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldTriggeredBy, v)) -} - -// TriggeredByGTE applies the GTE predicate on the "triggered_by" field. -func TriggeredByGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldTriggeredBy, v)) -} - -// TriggeredByLT applies the LT predicate on the "triggered_by" field. -func TriggeredByLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldTriggeredBy, v)) -} - -// TriggeredByLTE applies the LTE predicate on the "triggered_by" field. -func TriggeredByLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldTriggeredBy, v)) -} - -// TriggeredByContains applies the Contains predicate on the "triggered_by" field. -func TriggeredByContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldTriggeredBy, v)) -} - -// TriggeredByHasPrefix applies the HasPrefix predicate on the "triggered_by" field. -func TriggeredByHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldTriggeredBy, v)) -} - -// TriggeredByHasSuffix applies the HasSuffix predicate on the "triggered_by" field. -func TriggeredByHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldTriggeredBy, v)) -} - -// TriggeredByEqualFold applies the EqualFold predicate on the "triggered_by" field. -func TriggeredByEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldTriggeredBy, v)) -} - -// TriggeredByContainsFold applies the ContainsFold predicate on the "triggered_by" field. -func TriggeredByContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldTriggeredBy, v)) -} - -// IdempotencyKeyEQ applies the EQ predicate on the "idempotency_key" field. -func IdempotencyKeyEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyNEQ applies the NEQ predicate on the "idempotency_key" field. -func IdempotencyKeyNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyIn applies the In predicate on the "idempotency_key" field. -func IdempotencyKeyIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldIdempotencyKey, vs...)) -} - -// IdempotencyKeyNotIn applies the NotIn predicate on the "idempotency_key" field. -func IdempotencyKeyNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldIdempotencyKey, vs...)) -} - -// IdempotencyKeyGT applies the GT predicate on the "idempotency_key" field. -func IdempotencyKeyGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyGTE applies the GTE predicate on the "idempotency_key" field. -func IdempotencyKeyGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyLT applies the LT predicate on the "idempotency_key" field. -func IdempotencyKeyLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyLTE applies the LTE predicate on the "idempotency_key" field. -func IdempotencyKeyLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyContains applies the Contains predicate on the "idempotency_key" field. -func IdempotencyKeyContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyHasPrefix applies the HasPrefix predicate on the "idempotency_key" field. -func IdempotencyKeyHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyHasSuffix applies the HasSuffix predicate on the "idempotency_key" field. -func IdempotencyKeyHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyIsNil applies the IsNil predicate on the "idempotency_key" field. -func IdempotencyKeyIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldIdempotencyKey)) -} - -// IdempotencyKeyNotNil applies the NotNil predicate on the "idempotency_key" field. -func IdempotencyKeyNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldIdempotencyKey)) -} - -// IdempotencyKeyEqualFold applies the EqualFold predicate on the "idempotency_key" field. -func IdempotencyKeyEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldIdempotencyKey, v)) -} - -// IdempotencyKeyContainsFold applies the ContainsFold predicate on the "idempotency_key" field. -func IdempotencyKeyContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldIdempotencyKey, v)) -} - -// UploadToS3EQ applies the EQ predicate on the "upload_to_s3" field. -func UploadToS3EQ(v bool) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldUploadToS3, v)) -} - -// UploadToS3NEQ applies the NEQ predicate on the "upload_to_s3" field. -func UploadToS3NEQ(v bool) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldUploadToS3, v)) -} - -// S3ProfileIDEQ applies the EQ predicate on the "s3_profile_id" field. -func S3ProfileIDEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3ProfileID, v)) -} - -// S3ProfileIDNEQ applies the NEQ predicate on the "s3_profile_id" field. -func S3ProfileIDNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldS3ProfileID, v)) -} - -// S3ProfileIDIn applies the In predicate on the "s3_profile_id" field. -func S3ProfileIDIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldS3ProfileID, vs...)) -} - -// S3ProfileIDNotIn applies the NotIn predicate on the "s3_profile_id" field. -func S3ProfileIDNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldS3ProfileID, vs...)) -} - -// S3ProfileIDGT applies the GT predicate on the "s3_profile_id" field. -func S3ProfileIDGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldS3ProfileID, v)) -} - -// S3ProfileIDGTE applies the GTE predicate on the "s3_profile_id" field. -func S3ProfileIDGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldS3ProfileID, v)) -} - -// S3ProfileIDLT applies the LT predicate on the "s3_profile_id" field. -func S3ProfileIDLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldS3ProfileID, v)) -} - -// S3ProfileIDLTE applies the LTE predicate on the "s3_profile_id" field. -func S3ProfileIDLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldS3ProfileID, v)) -} - -// S3ProfileIDContains applies the Contains predicate on the "s3_profile_id" field. -func S3ProfileIDContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldS3ProfileID, v)) -} - -// S3ProfileIDHasPrefix applies the HasPrefix predicate on the "s3_profile_id" field. -func S3ProfileIDHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldS3ProfileID, v)) -} - -// S3ProfileIDHasSuffix applies the HasSuffix predicate on the "s3_profile_id" field. -func S3ProfileIDHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldS3ProfileID, v)) -} - -// S3ProfileIDIsNil applies the IsNil predicate on the "s3_profile_id" field. -func S3ProfileIDIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldS3ProfileID)) -} - -// S3ProfileIDNotNil applies the NotNil predicate on the "s3_profile_id" field. -func S3ProfileIDNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldS3ProfileID)) -} - -// S3ProfileIDEqualFold applies the EqualFold predicate on the "s3_profile_id" field. -func S3ProfileIDEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldS3ProfileID, v)) -} - -// S3ProfileIDContainsFold applies the ContainsFold predicate on the "s3_profile_id" field. -func S3ProfileIDContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldS3ProfileID, v)) -} - -// PostgresProfileIDEQ applies the EQ predicate on the "postgres_profile_id" field. -func PostgresProfileIDEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDNEQ applies the NEQ predicate on the "postgres_profile_id" field. -func PostgresProfileIDNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDIn applies the In predicate on the "postgres_profile_id" field. -func PostgresProfileIDIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldPostgresProfileID, vs...)) -} - -// PostgresProfileIDNotIn applies the NotIn predicate on the "postgres_profile_id" field. -func PostgresProfileIDNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldPostgresProfileID, vs...)) -} - -// PostgresProfileIDGT applies the GT predicate on the "postgres_profile_id" field. -func PostgresProfileIDGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDGTE applies the GTE predicate on the "postgres_profile_id" field. -func PostgresProfileIDGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDLT applies the LT predicate on the "postgres_profile_id" field. -func PostgresProfileIDLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDLTE applies the LTE predicate on the "postgres_profile_id" field. -func PostgresProfileIDLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDContains applies the Contains predicate on the "postgres_profile_id" field. -func PostgresProfileIDContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDHasPrefix applies the HasPrefix predicate on the "postgres_profile_id" field. -func PostgresProfileIDHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDHasSuffix applies the HasSuffix predicate on the "postgres_profile_id" field. -func PostgresProfileIDHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDIsNil applies the IsNil predicate on the "postgres_profile_id" field. -func PostgresProfileIDIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldPostgresProfileID)) -} - -// PostgresProfileIDNotNil applies the NotNil predicate on the "postgres_profile_id" field. -func PostgresProfileIDNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldPostgresProfileID)) -} - -// PostgresProfileIDEqualFold applies the EqualFold predicate on the "postgres_profile_id" field. -func PostgresProfileIDEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldPostgresProfileID, v)) -} - -// PostgresProfileIDContainsFold applies the ContainsFold predicate on the "postgres_profile_id" field. -func PostgresProfileIDContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldPostgresProfileID, v)) -} - -// RedisProfileIDEQ applies the EQ predicate on the "redis_profile_id" field. -func RedisProfileIDEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldRedisProfileID, v)) -} - -// RedisProfileIDNEQ applies the NEQ predicate on the "redis_profile_id" field. -func RedisProfileIDNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldRedisProfileID, v)) -} - -// RedisProfileIDIn applies the In predicate on the "redis_profile_id" field. -func RedisProfileIDIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldRedisProfileID, vs...)) -} - -// RedisProfileIDNotIn applies the NotIn predicate on the "redis_profile_id" field. -func RedisProfileIDNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldRedisProfileID, vs...)) -} - -// RedisProfileIDGT applies the GT predicate on the "redis_profile_id" field. -func RedisProfileIDGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldRedisProfileID, v)) -} - -// RedisProfileIDGTE applies the GTE predicate on the "redis_profile_id" field. -func RedisProfileIDGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldRedisProfileID, v)) -} - -// RedisProfileIDLT applies the LT predicate on the "redis_profile_id" field. -func RedisProfileIDLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldRedisProfileID, v)) -} - -// RedisProfileIDLTE applies the LTE predicate on the "redis_profile_id" field. -func RedisProfileIDLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldRedisProfileID, v)) -} - -// RedisProfileIDContains applies the Contains predicate on the "redis_profile_id" field. -func RedisProfileIDContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldRedisProfileID, v)) -} - -// RedisProfileIDHasPrefix applies the HasPrefix predicate on the "redis_profile_id" field. -func RedisProfileIDHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldRedisProfileID, v)) -} - -// RedisProfileIDHasSuffix applies the HasSuffix predicate on the "redis_profile_id" field. -func RedisProfileIDHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldRedisProfileID, v)) -} - -// RedisProfileIDIsNil applies the IsNil predicate on the "redis_profile_id" field. -func RedisProfileIDIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldRedisProfileID)) -} - -// RedisProfileIDNotNil applies the NotNil predicate on the "redis_profile_id" field. -func RedisProfileIDNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldRedisProfileID)) -} - -// RedisProfileIDEqualFold applies the EqualFold predicate on the "redis_profile_id" field. -func RedisProfileIDEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldRedisProfileID, v)) -} - -// RedisProfileIDContainsFold applies the ContainsFold predicate on the "redis_profile_id" field. -func RedisProfileIDContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldRedisProfileID, v)) -} - -// StartedAtEQ applies the EQ predicate on the "started_at" field. -func StartedAtEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldStartedAt, v)) -} - -// StartedAtNEQ applies the NEQ predicate on the "started_at" field. -func StartedAtNEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldStartedAt, v)) -} - -// StartedAtIn applies the In predicate on the "started_at" field. -func StartedAtIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldStartedAt, vs...)) -} - -// StartedAtNotIn applies the NotIn predicate on the "started_at" field. -func StartedAtNotIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldStartedAt, vs...)) -} - -// StartedAtGT applies the GT predicate on the "started_at" field. -func StartedAtGT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldStartedAt, v)) -} - -// StartedAtGTE applies the GTE predicate on the "started_at" field. -func StartedAtGTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldStartedAt, v)) -} - -// StartedAtLT applies the LT predicate on the "started_at" field. -func StartedAtLT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldStartedAt, v)) -} - -// StartedAtLTE applies the LTE predicate on the "started_at" field. -func StartedAtLTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldStartedAt, v)) -} - -// StartedAtIsNil applies the IsNil predicate on the "started_at" field. -func StartedAtIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldStartedAt)) -} - -// StartedAtNotNil applies the NotNil predicate on the "started_at" field. -func StartedAtNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldStartedAt)) -} - -// FinishedAtEQ applies the EQ predicate on the "finished_at" field. -func FinishedAtEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldFinishedAt, v)) -} - -// FinishedAtNEQ applies the NEQ predicate on the "finished_at" field. -func FinishedAtNEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldFinishedAt, v)) -} - -// FinishedAtIn applies the In predicate on the "finished_at" field. -func FinishedAtIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldFinishedAt, vs...)) -} - -// FinishedAtNotIn applies the NotIn predicate on the "finished_at" field. -func FinishedAtNotIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldFinishedAt, vs...)) -} - -// FinishedAtGT applies the GT predicate on the "finished_at" field. -func FinishedAtGT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldFinishedAt, v)) -} - -// FinishedAtGTE applies the GTE predicate on the "finished_at" field. -func FinishedAtGTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldFinishedAt, v)) -} - -// FinishedAtLT applies the LT predicate on the "finished_at" field. -func FinishedAtLT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldFinishedAt, v)) -} - -// FinishedAtLTE applies the LTE predicate on the "finished_at" field. -func FinishedAtLTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldFinishedAt, v)) -} - -// FinishedAtIsNil applies the IsNil predicate on the "finished_at" field. -func FinishedAtIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldFinishedAt)) -} - -// FinishedAtNotNil applies the NotNil predicate on the "finished_at" field. -func FinishedAtNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldFinishedAt)) -} - -// ErrorMessageEQ applies the EQ predicate on the "error_message" field. -func ErrorMessageEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldErrorMessage, v)) -} - -// ErrorMessageNEQ applies the NEQ predicate on the "error_message" field. -func ErrorMessageNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldErrorMessage, v)) -} - -// ErrorMessageIn applies the In predicate on the "error_message" field. -func ErrorMessageIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldErrorMessage, vs...)) -} - -// ErrorMessageNotIn applies the NotIn predicate on the "error_message" field. -func ErrorMessageNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldErrorMessage, vs...)) -} - -// ErrorMessageGT applies the GT predicate on the "error_message" field. -func ErrorMessageGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldErrorMessage, v)) -} - -// ErrorMessageGTE applies the GTE predicate on the "error_message" field. -func ErrorMessageGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldErrorMessage, v)) -} - -// ErrorMessageLT applies the LT predicate on the "error_message" field. -func ErrorMessageLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldErrorMessage, v)) -} - -// ErrorMessageLTE applies the LTE predicate on the "error_message" field. -func ErrorMessageLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldErrorMessage, v)) -} - -// ErrorMessageContains applies the Contains predicate on the "error_message" field. -func ErrorMessageContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldErrorMessage, v)) -} - -// ErrorMessageHasPrefix applies the HasPrefix predicate on the "error_message" field. -func ErrorMessageHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldErrorMessage, v)) -} - -// ErrorMessageHasSuffix applies the HasSuffix predicate on the "error_message" field. -func ErrorMessageHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldErrorMessage, v)) -} - -// ErrorMessageIsNil applies the IsNil predicate on the "error_message" field. -func ErrorMessageIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldErrorMessage)) -} - -// ErrorMessageNotNil applies the NotNil predicate on the "error_message" field. -func ErrorMessageNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldErrorMessage)) -} - -// ErrorMessageEqualFold applies the EqualFold predicate on the "error_message" field. -func ErrorMessageEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldErrorMessage, v)) -} - -// ErrorMessageContainsFold applies the ContainsFold predicate on the "error_message" field. -func ErrorMessageContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldErrorMessage, v)) -} - -// ArtifactLocalPathEQ applies the EQ predicate on the "artifact_local_path" field. -func ArtifactLocalPathEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathNEQ applies the NEQ predicate on the "artifact_local_path" field. -func ArtifactLocalPathNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathIn applies the In predicate on the "artifact_local_path" field. -func ArtifactLocalPathIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldArtifactLocalPath, vs...)) -} - -// ArtifactLocalPathNotIn applies the NotIn predicate on the "artifact_local_path" field. -func ArtifactLocalPathNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldArtifactLocalPath, vs...)) -} - -// ArtifactLocalPathGT applies the GT predicate on the "artifact_local_path" field. -func ArtifactLocalPathGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathGTE applies the GTE predicate on the "artifact_local_path" field. -func ArtifactLocalPathGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathLT applies the LT predicate on the "artifact_local_path" field. -func ArtifactLocalPathLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathLTE applies the LTE predicate on the "artifact_local_path" field. -func ArtifactLocalPathLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathContains applies the Contains predicate on the "artifact_local_path" field. -func ArtifactLocalPathContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathHasPrefix applies the HasPrefix predicate on the "artifact_local_path" field. -func ArtifactLocalPathHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathHasSuffix applies the HasSuffix predicate on the "artifact_local_path" field. -func ArtifactLocalPathHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathIsNil applies the IsNil predicate on the "artifact_local_path" field. -func ArtifactLocalPathIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldArtifactLocalPath)) -} - -// ArtifactLocalPathNotNil applies the NotNil predicate on the "artifact_local_path" field. -func ArtifactLocalPathNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldArtifactLocalPath)) -} - -// ArtifactLocalPathEqualFold applies the EqualFold predicate on the "artifact_local_path" field. -func ArtifactLocalPathEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldArtifactLocalPath, v)) -} - -// ArtifactLocalPathContainsFold applies the ContainsFold predicate on the "artifact_local_path" field. -func ArtifactLocalPathContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldArtifactLocalPath, v)) -} - -// ArtifactSizeBytesEQ applies the EQ predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesEQ(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldArtifactSizeBytes, v)) -} - -// ArtifactSizeBytesNEQ applies the NEQ predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesNEQ(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldArtifactSizeBytes, v)) -} - -// ArtifactSizeBytesIn applies the In predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesIn(vs ...int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldArtifactSizeBytes, vs...)) -} - -// ArtifactSizeBytesNotIn applies the NotIn predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesNotIn(vs ...int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldArtifactSizeBytes, vs...)) -} - -// ArtifactSizeBytesGT applies the GT predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesGT(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldArtifactSizeBytes, v)) -} - -// ArtifactSizeBytesGTE applies the GTE predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesGTE(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldArtifactSizeBytes, v)) -} - -// ArtifactSizeBytesLT applies the LT predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesLT(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldArtifactSizeBytes, v)) -} - -// ArtifactSizeBytesLTE applies the LTE predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesLTE(v int64) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldArtifactSizeBytes, v)) -} - -// ArtifactSizeBytesIsNil applies the IsNil predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldArtifactSizeBytes)) -} - -// ArtifactSizeBytesNotNil applies the NotNil predicate on the "artifact_size_bytes" field. -func ArtifactSizeBytesNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldArtifactSizeBytes)) -} - -// ArtifactSha256EQ applies the EQ predicate on the "artifact_sha256" field. -func ArtifactSha256EQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldArtifactSha256, v)) -} - -// ArtifactSha256NEQ applies the NEQ predicate on the "artifact_sha256" field. -func ArtifactSha256NEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldArtifactSha256, v)) -} - -// ArtifactSha256In applies the In predicate on the "artifact_sha256" field. -func ArtifactSha256In(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldArtifactSha256, vs...)) -} - -// ArtifactSha256NotIn applies the NotIn predicate on the "artifact_sha256" field. -func ArtifactSha256NotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldArtifactSha256, vs...)) -} - -// ArtifactSha256GT applies the GT predicate on the "artifact_sha256" field. -func ArtifactSha256GT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldArtifactSha256, v)) -} - -// ArtifactSha256GTE applies the GTE predicate on the "artifact_sha256" field. -func ArtifactSha256GTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldArtifactSha256, v)) -} - -// ArtifactSha256LT applies the LT predicate on the "artifact_sha256" field. -func ArtifactSha256LT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldArtifactSha256, v)) -} - -// ArtifactSha256LTE applies the LTE predicate on the "artifact_sha256" field. -func ArtifactSha256LTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldArtifactSha256, v)) -} - -// ArtifactSha256Contains applies the Contains predicate on the "artifact_sha256" field. -func ArtifactSha256Contains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldArtifactSha256, v)) -} - -// ArtifactSha256HasPrefix applies the HasPrefix predicate on the "artifact_sha256" field. -func ArtifactSha256HasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldArtifactSha256, v)) -} - -// ArtifactSha256HasSuffix applies the HasSuffix predicate on the "artifact_sha256" field. -func ArtifactSha256HasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldArtifactSha256, v)) -} - -// ArtifactSha256IsNil applies the IsNil predicate on the "artifact_sha256" field. -func ArtifactSha256IsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldArtifactSha256)) -} - -// ArtifactSha256NotNil applies the NotNil predicate on the "artifact_sha256" field. -func ArtifactSha256NotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldArtifactSha256)) -} - -// ArtifactSha256EqualFold applies the EqualFold predicate on the "artifact_sha256" field. -func ArtifactSha256EqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldArtifactSha256, v)) -} - -// ArtifactSha256ContainsFold applies the ContainsFold predicate on the "artifact_sha256" field. -func ArtifactSha256ContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldArtifactSha256, v)) -} - -// S3BucketEQ applies the EQ predicate on the "s3_bucket" field. -func S3BucketEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3Bucket, v)) -} - -// S3BucketNEQ applies the NEQ predicate on the "s3_bucket" field. -func S3BucketNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldS3Bucket, v)) -} - -// S3BucketIn applies the In predicate on the "s3_bucket" field. -func S3BucketIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldS3Bucket, vs...)) -} - -// S3BucketNotIn applies the NotIn predicate on the "s3_bucket" field. -func S3BucketNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldS3Bucket, vs...)) -} - -// S3BucketGT applies the GT predicate on the "s3_bucket" field. -func S3BucketGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldS3Bucket, v)) -} - -// S3BucketGTE applies the GTE predicate on the "s3_bucket" field. -func S3BucketGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldS3Bucket, v)) -} - -// S3BucketLT applies the LT predicate on the "s3_bucket" field. -func S3BucketLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldS3Bucket, v)) -} - -// S3BucketLTE applies the LTE predicate on the "s3_bucket" field. -func S3BucketLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldS3Bucket, v)) -} - -// S3BucketContains applies the Contains predicate on the "s3_bucket" field. -func S3BucketContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldS3Bucket, v)) -} - -// S3BucketHasPrefix applies the HasPrefix predicate on the "s3_bucket" field. -func S3BucketHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldS3Bucket, v)) -} - -// S3BucketHasSuffix applies the HasSuffix predicate on the "s3_bucket" field. -func S3BucketHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldS3Bucket, v)) -} - -// S3BucketIsNil applies the IsNil predicate on the "s3_bucket" field. -func S3BucketIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldS3Bucket)) -} - -// S3BucketNotNil applies the NotNil predicate on the "s3_bucket" field. -func S3BucketNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldS3Bucket)) -} - -// S3BucketEqualFold applies the EqualFold predicate on the "s3_bucket" field. -func S3BucketEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldS3Bucket, v)) -} - -// S3BucketContainsFold applies the ContainsFold predicate on the "s3_bucket" field. -func S3BucketContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldS3Bucket, v)) -} - -// S3KeyEQ applies the EQ predicate on the "s3_key" field. -func S3KeyEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3Key, v)) -} - -// S3KeyNEQ applies the NEQ predicate on the "s3_key" field. -func S3KeyNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldS3Key, v)) -} - -// S3KeyIn applies the In predicate on the "s3_key" field. -func S3KeyIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldS3Key, vs...)) -} - -// S3KeyNotIn applies the NotIn predicate on the "s3_key" field. -func S3KeyNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldS3Key, vs...)) -} - -// S3KeyGT applies the GT predicate on the "s3_key" field. -func S3KeyGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldS3Key, v)) -} - -// S3KeyGTE applies the GTE predicate on the "s3_key" field. -func S3KeyGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldS3Key, v)) -} - -// S3KeyLT applies the LT predicate on the "s3_key" field. -func S3KeyLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldS3Key, v)) -} - -// S3KeyLTE applies the LTE predicate on the "s3_key" field. -func S3KeyLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldS3Key, v)) -} - -// S3KeyContains applies the Contains predicate on the "s3_key" field. -func S3KeyContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldS3Key, v)) -} - -// S3KeyHasPrefix applies the HasPrefix predicate on the "s3_key" field. -func S3KeyHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldS3Key, v)) -} - -// S3KeyHasSuffix applies the HasSuffix predicate on the "s3_key" field. -func S3KeyHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldS3Key, v)) -} - -// S3KeyIsNil applies the IsNil predicate on the "s3_key" field. -func S3KeyIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldS3Key)) -} - -// S3KeyNotNil applies the NotNil predicate on the "s3_key" field. -func S3KeyNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldS3Key)) -} - -// S3KeyEqualFold applies the EqualFold predicate on the "s3_key" field. -func S3KeyEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldS3Key, v)) -} - -// S3KeyContainsFold applies the ContainsFold predicate on the "s3_key" field. -func S3KeyContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldS3Key, v)) -} - -// S3EtagEQ applies the EQ predicate on the "s3_etag" field. -func S3EtagEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldS3Etag, v)) -} - -// S3EtagNEQ applies the NEQ predicate on the "s3_etag" field. -func S3EtagNEQ(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldS3Etag, v)) -} - -// S3EtagIn applies the In predicate on the "s3_etag" field. -func S3EtagIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldS3Etag, vs...)) -} - -// S3EtagNotIn applies the NotIn predicate on the "s3_etag" field. -func S3EtagNotIn(vs ...string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldS3Etag, vs...)) -} - -// S3EtagGT applies the GT predicate on the "s3_etag" field. -func S3EtagGT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldS3Etag, v)) -} - -// S3EtagGTE applies the GTE predicate on the "s3_etag" field. -func S3EtagGTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldS3Etag, v)) -} - -// S3EtagLT applies the LT predicate on the "s3_etag" field. -func S3EtagLT(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldS3Etag, v)) -} - -// S3EtagLTE applies the LTE predicate on the "s3_etag" field. -func S3EtagLTE(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldS3Etag, v)) -} - -// S3EtagContains applies the Contains predicate on the "s3_etag" field. -func S3EtagContains(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContains(FieldS3Etag, v)) -} - -// S3EtagHasPrefix applies the HasPrefix predicate on the "s3_etag" field. -func S3EtagHasPrefix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasPrefix(FieldS3Etag, v)) -} - -// S3EtagHasSuffix applies the HasSuffix predicate on the "s3_etag" field. -func S3EtagHasSuffix(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldHasSuffix(FieldS3Etag, v)) -} - -// S3EtagIsNil applies the IsNil predicate on the "s3_etag" field. -func S3EtagIsNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldIsNull(FieldS3Etag)) -} - -// S3EtagNotNil applies the NotNil predicate on the "s3_etag" field. -func S3EtagNotNil() predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotNull(FieldS3Etag)) -} - -// S3EtagEqualFold applies the EqualFold predicate on the "s3_etag" field. -func S3EtagEqualFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEqualFold(FieldS3Etag, v)) -} - -// S3EtagContainsFold applies the ContainsFold predicate on the "s3_etag" field. -func S3EtagContainsFold(v string) predicate.BackupJob { - return predicate.BackupJob(sql.FieldContainsFold(FieldS3Etag, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldCreatedAt, v)) -} - -// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. -func UpdatedAtEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. -func UpdatedAtNEQ(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtIn applies the In predicate on the "updated_at" field. -func UpdatedAtIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. -func UpdatedAtNotIn(vs ...time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldNotIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtGT applies the GT predicate on the "updated_at" field. -func UpdatedAtGT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGT(FieldUpdatedAt, v)) -} - -// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. -func UpdatedAtGTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldGTE(FieldUpdatedAt, v)) -} - -// UpdatedAtLT applies the LT predicate on the "updated_at" field. -func UpdatedAtLT(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLT(FieldUpdatedAt, v)) -} - -// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. -func UpdatedAtLTE(v time.Time) predicate.BackupJob { - return predicate.BackupJob(sql.FieldLTE(FieldUpdatedAt, v)) -} - -// HasEvents applies the HasEdge predicate on the "events" edge. -func HasEvents() predicate.BackupJob { - return predicate.BackupJob(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, EventsTable, EventsColumn), - ) - sqlgraph.HasNeighbors(s, step) - }) -} - -// HasEventsWith applies the HasEdge predicate on the "events" edge with a given conditions (other predicates). -func HasEventsWith(preds ...predicate.BackupJobEvent) predicate.BackupJob { - return predicate.BackupJob(func(s *sql.Selector) { - step := newEventsStep() - sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { - for _, p := range preds { - p(s) - } - }) - }) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.BackupJob) predicate.BackupJob { - return predicate.BackupJob(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.BackupJob) predicate.BackupJob { - return predicate.BackupJob(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.BackupJob) predicate.BackupJob { - return predicate.BackupJob(sql.NotPredicates(p)) -} diff --git a/backup/ent/backupjob_create.go b/backup/ent/backupjob_create.go deleted file mode 100644 index 8bc60bba8..000000000 --- a/backup/ent/backupjob_create.go +++ /dev/null @@ -1,604 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" -) - -// BackupJobCreate is the builder for creating a BackupJob entity. -type BackupJobCreate struct { - config - mutation *BackupJobMutation - hooks []Hook -} - -// SetJobID sets the "job_id" field. -func (_c *BackupJobCreate) SetJobID(v string) *BackupJobCreate { - _c.mutation.SetJobID(v) - return _c -} - -// SetBackupType sets the "backup_type" field. -func (_c *BackupJobCreate) SetBackupType(v backupjob.BackupType) *BackupJobCreate { - _c.mutation.SetBackupType(v) - return _c -} - -// SetStatus sets the "status" field. -func (_c *BackupJobCreate) SetStatus(v backupjob.Status) *BackupJobCreate { - _c.mutation.SetStatus(v) - return _c -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableStatus(v *backupjob.Status) *BackupJobCreate { - if v != nil { - _c.SetStatus(*v) - } - return _c -} - -// SetTriggeredBy sets the "triggered_by" field. -func (_c *BackupJobCreate) SetTriggeredBy(v string) *BackupJobCreate { - _c.mutation.SetTriggeredBy(v) - return _c -} - -// SetNillableTriggeredBy sets the "triggered_by" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableTriggeredBy(v *string) *BackupJobCreate { - if v != nil { - _c.SetTriggeredBy(*v) - } - return _c -} - -// SetIdempotencyKey sets the "idempotency_key" field. -func (_c *BackupJobCreate) SetIdempotencyKey(v string) *BackupJobCreate { - _c.mutation.SetIdempotencyKey(v) - return _c -} - -// SetNillableIdempotencyKey sets the "idempotency_key" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableIdempotencyKey(v *string) *BackupJobCreate { - if v != nil { - _c.SetIdempotencyKey(*v) - } - return _c -} - -// SetUploadToS3 sets the "upload_to_s3" field. -func (_c *BackupJobCreate) SetUploadToS3(v bool) *BackupJobCreate { - _c.mutation.SetUploadToS3(v) - return _c -} - -// SetNillableUploadToS3 sets the "upload_to_s3" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableUploadToS3(v *bool) *BackupJobCreate { - if v != nil { - _c.SetUploadToS3(*v) - } - return _c -} - -// SetS3ProfileID sets the "s3_profile_id" field. -func (_c *BackupJobCreate) SetS3ProfileID(v string) *BackupJobCreate { - _c.mutation.SetS3ProfileID(v) - return _c -} - -// SetNillableS3ProfileID sets the "s3_profile_id" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableS3ProfileID(v *string) *BackupJobCreate { - if v != nil { - _c.SetS3ProfileID(*v) - } - return _c -} - -// SetPostgresProfileID sets the "postgres_profile_id" field. -func (_c *BackupJobCreate) SetPostgresProfileID(v string) *BackupJobCreate { - _c.mutation.SetPostgresProfileID(v) - return _c -} - -// SetNillablePostgresProfileID sets the "postgres_profile_id" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillablePostgresProfileID(v *string) *BackupJobCreate { - if v != nil { - _c.SetPostgresProfileID(*v) - } - return _c -} - -// SetRedisProfileID sets the "redis_profile_id" field. -func (_c *BackupJobCreate) SetRedisProfileID(v string) *BackupJobCreate { - _c.mutation.SetRedisProfileID(v) - return _c -} - -// SetNillableRedisProfileID sets the "redis_profile_id" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableRedisProfileID(v *string) *BackupJobCreate { - if v != nil { - _c.SetRedisProfileID(*v) - } - return _c -} - -// SetStartedAt sets the "started_at" field. -func (_c *BackupJobCreate) SetStartedAt(v time.Time) *BackupJobCreate { - _c.mutation.SetStartedAt(v) - return _c -} - -// SetNillableStartedAt sets the "started_at" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableStartedAt(v *time.Time) *BackupJobCreate { - if v != nil { - _c.SetStartedAt(*v) - } - return _c -} - -// SetFinishedAt sets the "finished_at" field. -func (_c *BackupJobCreate) SetFinishedAt(v time.Time) *BackupJobCreate { - _c.mutation.SetFinishedAt(v) - return _c -} - -// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableFinishedAt(v *time.Time) *BackupJobCreate { - if v != nil { - _c.SetFinishedAt(*v) - } - return _c -} - -// SetErrorMessage sets the "error_message" field. -func (_c *BackupJobCreate) SetErrorMessage(v string) *BackupJobCreate { - _c.mutation.SetErrorMessage(v) - return _c -} - -// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableErrorMessage(v *string) *BackupJobCreate { - if v != nil { - _c.SetErrorMessage(*v) - } - return _c -} - -// SetArtifactLocalPath sets the "artifact_local_path" field. -func (_c *BackupJobCreate) SetArtifactLocalPath(v string) *BackupJobCreate { - _c.mutation.SetArtifactLocalPath(v) - return _c -} - -// SetNillableArtifactLocalPath sets the "artifact_local_path" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableArtifactLocalPath(v *string) *BackupJobCreate { - if v != nil { - _c.SetArtifactLocalPath(*v) - } - return _c -} - -// SetArtifactSizeBytes sets the "artifact_size_bytes" field. -func (_c *BackupJobCreate) SetArtifactSizeBytes(v int64) *BackupJobCreate { - _c.mutation.SetArtifactSizeBytes(v) - return _c -} - -// SetNillableArtifactSizeBytes sets the "artifact_size_bytes" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableArtifactSizeBytes(v *int64) *BackupJobCreate { - if v != nil { - _c.SetArtifactSizeBytes(*v) - } - return _c -} - -// SetArtifactSha256 sets the "artifact_sha256" field. -func (_c *BackupJobCreate) SetArtifactSha256(v string) *BackupJobCreate { - _c.mutation.SetArtifactSha256(v) - return _c -} - -// SetNillableArtifactSha256 sets the "artifact_sha256" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableArtifactSha256(v *string) *BackupJobCreate { - if v != nil { - _c.SetArtifactSha256(*v) - } - return _c -} - -// SetS3Bucket sets the "s3_bucket" field. -func (_c *BackupJobCreate) SetS3Bucket(v string) *BackupJobCreate { - _c.mutation.SetS3Bucket(v) - return _c -} - -// SetNillableS3Bucket sets the "s3_bucket" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableS3Bucket(v *string) *BackupJobCreate { - if v != nil { - _c.SetS3Bucket(*v) - } - return _c -} - -// SetS3Key sets the "s3_key" field. -func (_c *BackupJobCreate) SetS3Key(v string) *BackupJobCreate { - _c.mutation.SetS3Key(v) - return _c -} - -// SetNillableS3Key sets the "s3_key" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableS3Key(v *string) *BackupJobCreate { - if v != nil { - _c.SetS3Key(*v) - } - return _c -} - -// SetS3Etag sets the "s3_etag" field. -func (_c *BackupJobCreate) SetS3Etag(v string) *BackupJobCreate { - _c.mutation.SetS3Etag(v) - return _c -} - -// SetNillableS3Etag sets the "s3_etag" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableS3Etag(v *string) *BackupJobCreate { - if v != nil { - _c.SetS3Etag(*v) - } - return _c -} - -// SetCreatedAt sets the "created_at" field. -func (_c *BackupJobCreate) SetCreatedAt(v time.Time) *BackupJobCreate { - _c.mutation.SetCreatedAt(v) - return _c -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableCreatedAt(v *time.Time) *BackupJobCreate { - if v != nil { - _c.SetCreatedAt(*v) - } - return _c -} - -// SetUpdatedAt sets the "updated_at" field. -func (_c *BackupJobCreate) SetUpdatedAt(v time.Time) *BackupJobCreate { - _c.mutation.SetUpdatedAt(v) - return _c -} - -// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (_c *BackupJobCreate) SetNillableUpdatedAt(v *time.Time) *BackupJobCreate { - if v != nil { - _c.SetUpdatedAt(*v) - } - return _c -} - -// AddEventIDs adds the "events" edge to the BackupJobEvent entity by IDs. -func (_c *BackupJobCreate) AddEventIDs(ids ...int) *BackupJobCreate { - _c.mutation.AddEventIDs(ids...) - return _c -} - -// AddEvents adds the "events" edges to the BackupJobEvent entity. -func (_c *BackupJobCreate) AddEvents(v ...*BackupJobEvent) *BackupJobCreate { - ids := make([]int, len(v)) - for i := range v { - ids[i] = v[i].ID - } - return _c.AddEventIDs(ids...) -} - -// Mutation returns the BackupJobMutation object of the builder. -func (_c *BackupJobCreate) Mutation() *BackupJobMutation { - return _c.mutation -} - -// Save creates the BackupJob in the database. -func (_c *BackupJobCreate) Save(ctx context.Context) (*BackupJob, error) { - _c.defaults() - return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (_c *BackupJobCreate) SaveX(ctx context.Context) *BackupJob { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupJobCreate) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupJobCreate) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_c *BackupJobCreate) defaults() { - if _, ok := _c.mutation.Status(); !ok { - v := backupjob.DefaultStatus - _c.mutation.SetStatus(v) - } - if _, ok := _c.mutation.TriggeredBy(); !ok { - v := backupjob.DefaultTriggeredBy - _c.mutation.SetTriggeredBy(v) - } - if _, ok := _c.mutation.UploadToS3(); !ok { - v := backupjob.DefaultUploadToS3 - _c.mutation.SetUploadToS3(v) - } - if _, ok := _c.mutation.CreatedAt(); !ok { - v := backupjob.DefaultCreatedAt() - _c.mutation.SetCreatedAt(v) - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - v := backupjob.DefaultUpdatedAt() - _c.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_c *BackupJobCreate) check() error { - if _, ok := _c.mutation.JobID(); !ok { - return &ValidationError{Name: "job_id", err: errors.New(`ent: missing required field "BackupJob.job_id"`)} - } - if _, ok := _c.mutation.BackupType(); !ok { - return &ValidationError{Name: "backup_type", err: errors.New(`ent: missing required field "BackupJob.backup_type"`)} - } - if v, ok := _c.mutation.BackupType(); ok { - if err := backupjob.BackupTypeValidator(v); err != nil { - return &ValidationError{Name: "backup_type", err: fmt.Errorf(`ent: validator failed for field "BackupJob.backup_type": %w`, err)} - } - } - if _, ok := _c.mutation.Status(); !ok { - return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "BackupJob.status"`)} - } - if v, ok := _c.mutation.Status(); ok { - if err := backupjob.StatusValidator(v); err != nil { - return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "BackupJob.status": %w`, err)} - } - } - if _, ok := _c.mutation.TriggeredBy(); !ok { - return &ValidationError{Name: "triggered_by", err: errors.New(`ent: missing required field "BackupJob.triggered_by"`)} - } - if _, ok := _c.mutation.UploadToS3(); !ok { - return &ValidationError{Name: "upload_to_s3", err: errors.New(`ent: missing required field "BackupJob.upload_to_s3"`)} - } - if _, ok := _c.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupJob.created_at"`)} - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupJob.updated_at"`)} - } - return nil -} - -func (_c *BackupJobCreate) sqlSave(ctx context.Context) (*BackupJob, error) { - if err := _c.check(); err != nil { - return nil, err - } - _node, _spec := _c.createSpec() - if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - _c.mutation.id = &_node.ID - _c.mutation.done = true - return _node, nil -} - -func (_c *BackupJobCreate) createSpec() (*BackupJob, *sqlgraph.CreateSpec) { - var ( - _node = &BackupJob{config: _c.config} - _spec = sqlgraph.NewCreateSpec(backupjob.Table, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) - ) - if value, ok := _c.mutation.JobID(); ok { - _spec.SetField(backupjob.FieldJobID, field.TypeString, value) - _node.JobID = value - } - if value, ok := _c.mutation.BackupType(); ok { - _spec.SetField(backupjob.FieldBackupType, field.TypeEnum, value) - _node.BackupType = value - } - if value, ok := _c.mutation.Status(); ok { - _spec.SetField(backupjob.FieldStatus, field.TypeEnum, value) - _node.Status = value - } - if value, ok := _c.mutation.TriggeredBy(); ok { - _spec.SetField(backupjob.FieldTriggeredBy, field.TypeString, value) - _node.TriggeredBy = value - } - if value, ok := _c.mutation.IdempotencyKey(); ok { - _spec.SetField(backupjob.FieldIdempotencyKey, field.TypeString, value) - _node.IdempotencyKey = value - } - if value, ok := _c.mutation.UploadToS3(); ok { - _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) - _node.UploadToS3 = value - } - if value, ok := _c.mutation.S3ProfileID(); ok { - _spec.SetField(backupjob.FieldS3ProfileID, field.TypeString, value) - _node.S3ProfileID = value - } - if value, ok := _c.mutation.PostgresProfileID(); ok { - _spec.SetField(backupjob.FieldPostgresProfileID, field.TypeString, value) - _node.PostgresProfileID = value - } - if value, ok := _c.mutation.RedisProfileID(); ok { - _spec.SetField(backupjob.FieldRedisProfileID, field.TypeString, value) - _node.RedisProfileID = value - } - if value, ok := _c.mutation.StartedAt(); ok { - _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) - _node.StartedAt = &value - } - if value, ok := _c.mutation.FinishedAt(); ok { - _spec.SetField(backupjob.FieldFinishedAt, field.TypeTime, value) - _node.FinishedAt = &value - } - if value, ok := _c.mutation.ErrorMessage(); ok { - _spec.SetField(backupjob.FieldErrorMessage, field.TypeString, value) - _node.ErrorMessage = value - } - if value, ok := _c.mutation.ArtifactLocalPath(); ok { - _spec.SetField(backupjob.FieldArtifactLocalPath, field.TypeString, value) - _node.ArtifactLocalPath = value - } - if value, ok := _c.mutation.ArtifactSizeBytes(); ok { - _spec.SetField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) - _node.ArtifactSizeBytes = &value - } - if value, ok := _c.mutation.ArtifactSha256(); ok { - _spec.SetField(backupjob.FieldArtifactSha256, field.TypeString, value) - _node.ArtifactSha256 = value - } - if value, ok := _c.mutation.S3Bucket(); ok { - _spec.SetField(backupjob.FieldS3Bucket, field.TypeString, value) - _node.S3Bucket = value - } - if value, ok := _c.mutation.S3Key(); ok { - _spec.SetField(backupjob.FieldS3Key, field.TypeString, value) - _node.S3Key = value - } - if value, ok := _c.mutation.S3Etag(); ok { - _spec.SetField(backupjob.FieldS3Etag, field.TypeString, value) - _node.S3Etag = value - } - if value, ok := _c.mutation.CreatedAt(); ok { - _spec.SetField(backupjob.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if value, ok := _c.mutation.UpdatedAt(); ok { - _spec.SetField(backupjob.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = value - } - if nodes := _c.mutation.EventsIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges = append(_spec.Edges, edge) - } - return _node, _spec -} - -// BackupJobCreateBulk is the builder for creating many BackupJob entities in bulk. -type BackupJobCreateBulk struct { - config - err error - builders []*BackupJobCreate -} - -// Save creates the BackupJob entities in the database. -func (_c *BackupJobCreateBulk) Save(ctx context.Context) ([]*BackupJob, error) { - if _c.err != nil { - return nil, _c.err - } - specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) - nodes := make([]*BackupJob, len(_c.builders)) - mutators := make([]Mutator, len(_c.builders)) - for i := range _c.builders { - func(i int, root context.Context) { - builder := _c.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BackupJobMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (_c *BackupJobCreateBulk) SaveX(ctx context.Context) []*BackupJob { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupJobCreateBulk) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupJobCreateBulk) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupjob_delete.go b/backup/ent/backupjob_delete.go deleted file mode 100644 index 8aba6a0c9..000000000 --- a/backup/ent/backupjob_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupJobDelete is the builder for deleting a BackupJob entity. -type BackupJobDelete struct { - config - hooks []Hook - mutation *BackupJobMutation -} - -// Where appends a list predicates to the BackupJobDelete builder. -func (_d *BackupJobDelete) Where(ps ...predicate.BackupJob) *BackupJobDelete { - _d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (_d *BackupJobDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupJobDelete) ExecX(ctx context.Context) int { - n, err := _d.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (_d *BackupJobDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(backupjob.Table, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) - if ps := _d.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - _d.mutation.done = true - return affected, err -} - -// BackupJobDeleteOne is the builder for deleting a single BackupJob entity. -type BackupJobDeleteOne struct { - _d *BackupJobDelete -} - -// Where appends a list predicates to the BackupJobDelete builder. -func (_d *BackupJobDeleteOne) Where(ps ...predicate.BackupJob) *BackupJobDeleteOne { - _d._d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query. -func (_d *BackupJobDeleteOne) Exec(ctx context.Context) error { - n, err := _d._d.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{backupjob.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupJobDeleteOne) ExecX(ctx context.Context) { - if err := _d.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupjob_query.go b/backup/ent/backupjob_query.go deleted file mode 100644 index 4ec719a6d..000000000 --- a/backup/ent/backupjob_query.go +++ /dev/null @@ -1,606 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "database/sql/driver" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupJobQuery is the builder for querying BackupJob entities. -type BackupJobQuery struct { - config - ctx *QueryContext - order []backupjob.OrderOption - inters []Interceptor - predicates []predicate.BackupJob - withEvents *BackupJobEventQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the BackupJobQuery builder. -func (_q *BackupJobQuery) Where(ps ...predicate.BackupJob) *BackupJobQuery { - _q.predicates = append(_q.predicates, ps...) - return _q -} - -// Limit the number of records to be returned by this query. -func (_q *BackupJobQuery) Limit(limit int) *BackupJobQuery { - _q.ctx.Limit = &limit - return _q -} - -// Offset to start from. -func (_q *BackupJobQuery) Offset(offset int) *BackupJobQuery { - _q.ctx.Offset = &offset - return _q -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (_q *BackupJobQuery) Unique(unique bool) *BackupJobQuery { - _q.ctx.Unique = &unique - return _q -} - -// Order specifies how the records should be ordered. -func (_q *BackupJobQuery) Order(o ...backupjob.OrderOption) *BackupJobQuery { - _q.order = append(_q.order, o...) - return _q -} - -// QueryEvents chains the current query on the "events" edge. -func (_q *BackupJobQuery) QueryEvents() *BackupJobEventQuery { - query := (&BackupJobEventClient{config: _q.config}).Query() - query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - selector := _q.sqlQuery(ctx) - if err := selector.Err(); err != nil { - return nil, err - } - step := sqlgraph.NewStep( - sqlgraph.From(backupjob.Table, backupjob.FieldID, selector), - sqlgraph.To(backupjobevent.Table, backupjobevent.FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, backupjob.EventsTable, backupjob.EventsColumn), - ) - fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) - return fromU, nil - } - return query -} - -// First returns the first BackupJob entity from the query. -// Returns a *NotFoundError when no BackupJob was found. -func (_q *BackupJobQuery) First(ctx context.Context) (*BackupJob, error) { - nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{backupjob.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (_q *BackupJobQuery) FirstX(ctx context.Context) *BackupJob { - node, err := _q.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first BackupJob ID from the query. -// Returns a *NotFoundError when no BackupJob ID was found. -func (_q *BackupJobQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{backupjob.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (_q *BackupJobQuery) FirstIDX(ctx context.Context) int { - id, err := _q.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single BackupJob entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one BackupJob entity is found. -// Returns a *NotFoundError when no BackupJob entities are found. -func (_q *BackupJobQuery) Only(ctx context.Context) (*BackupJob, error) { - nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{backupjob.Label} - default: - return nil, &NotSingularError{backupjob.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (_q *BackupJobQuery) OnlyX(ctx context.Context) *BackupJob { - node, err := _q.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only BackupJob ID in the query. -// Returns a *NotSingularError when more than one BackupJob ID is found. -// Returns a *NotFoundError when no entities are found. -func (_q *BackupJobQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{backupjob.Label} - default: - err = &NotSingularError{backupjob.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (_q *BackupJobQuery) OnlyIDX(ctx context.Context) int { - id, err := _q.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of BackupJobs. -func (_q *BackupJobQuery) All(ctx context.Context) ([]*BackupJob, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*BackupJob, *BackupJobQuery]() - return withInterceptors[[]*BackupJob](ctx, _q, qr, _q.inters) -} - -// AllX is like All, but panics if an error occurs. -func (_q *BackupJobQuery) AllX(ctx context.Context) []*BackupJob { - nodes, err := _q.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of BackupJob IDs. -func (_q *BackupJobQuery) IDs(ctx context.Context) (ids []int, err error) { - if _q.ctx.Unique == nil && _q.path != nil { - _q.Unique(true) - } - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) - if err = _q.Select(backupjob.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (_q *BackupJobQuery) IDsX(ctx context.Context) []int { - ids, err := _q.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (_q *BackupJobQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) - if err := _q.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, _q, querierCount[*BackupJobQuery](), _q.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (_q *BackupJobQuery) CountX(ctx context.Context) int { - count, err := _q.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (_q *BackupJobQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) - switch _, err := _q.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (_q *BackupJobQuery) ExistX(ctx context.Context) bool { - exist, err := _q.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the BackupJobQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (_q *BackupJobQuery) Clone() *BackupJobQuery { - if _q == nil { - return nil - } - return &BackupJobQuery{ - config: _q.config, - ctx: _q.ctx.Clone(), - order: append([]backupjob.OrderOption{}, _q.order...), - inters: append([]Interceptor{}, _q.inters...), - predicates: append([]predicate.BackupJob{}, _q.predicates...), - withEvents: _q.withEvents.Clone(), - // clone intermediate query. - sql: _q.sql.Clone(), - path: _q.path, - } -} - -// WithEvents tells the query-builder to eager-load the nodes that are connected to -// the "events" edge. The optional arguments are used to configure the query builder of the edge. -func (_q *BackupJobQuery) WithEvents(opts ...func(*BackupJobEventQuery)) *BackupJobQuery { - query := (&BackupJobEventClient{config: _q.config}).Query() - for _, opt := range opts { - opt(query) - } - _q.withEvents = query - return _q -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// JobID string `json:"job_id,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.BackupJob.Query(). -// GroupBy(backupjob.FieldJobID). -// Aggregate(ent.Count()). -// Scan(ctx, &v) -func (_q *BackupJobQuery) GroupBy(field string, fields ...string) *BackupJobGroupBy { - _q.ctx.Fields = append([]string{field}, fields...) - grbuild := &BackupJobGroupBy{build: _q} - grbuild.flds = &_q.ctx.Fields - grbuild.label = backupjob.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// JobID string `json:"job_id,omitempty"` -// } -// -// client.BackupJob.Query(). -// Select(backupjob.FieldJobID). -// Scan(ctx, &v) -func (_q *BackupJobQuery) Select(fields ...string) *BackupJobSelect { - _q.ctx.Fields = append(_q.ctx.Fields, fields...) - sbuild := &BackupJobSelect{BackupJobQuery: _q} - sbuild.label = backupjob.Label - sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a BackupJobSelect configured with the given aggregations. -func (_q *BackupJobQuery) Aggregate(fns ...AggregateFunc) *BackupJobSelect { - return _q.Select().Aggregate(fns...) -} - -func (_q *BackupJobQuery) prepareQuery(ctx context.Context) error { - for _, inter := range _q.inters { - if inter == nil { - return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, _q); err != nil { - return err - } - } - } - for _, f := range _q.ctx.Fields { - if !backupjob.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - } - if _q.path != nil { - prev, err := _q.path(ctx) - if err != nil { - return err - } - _q.sql = prev - } - return nil -} - -func (_q *BackupJobQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupJob, error) { - var ( - nodes = []*BackupJob{} - _spec = _q.querySpec() - loadedTypes = [1]bool{ - _q.withEvents != nil, - } - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*BackupJob).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &BackupJob{config: _q.config} - nodes = append(nodes, node) - node.Edges.loadedTypes = loadedTypes - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - if query := _q.withEvents; query != nil { - if err := _q.loadEvents(ctx, query, nodes, - func(n *BackupJob) { n.Edges.Events = []*BackupJobEvent{} }, - func(n *BackupJob, e *BackupJobEvent) { n.Edges.Events = append(n.Edges.Events, e) }); err != nil { - return nil, err - } - } - return nodes, nil -} - -func (_q *BackupJobQuery) loadEvents(ctx context.Context, query *BackupJobEventQuery, nodes []*BackupJob, init func(*BackupJob), assign func(*BackupJob, *BackupJobEvent)) error { - fks := make([]driver.Value, 0, len(nodes)) - nodeids := make(map[int]*BackupJob) - for i := range nodes { - fks = append(fks, nodes[i].ID) - nodeids[nodes[i].ID] = nodes[i] - if init != nil { - init(nodes[i]) - } - } - if len(query.ctx.Fields) > 0 { - query.ctx.AppendFieldOnce(backupjobevent.FieldBackupJobID) - } - query.Where(predicate.BackupJobEvent(func(s *sql.Selector) { - s.Where(sql.InValues(s.C(backupjob.EventsColumn), fks...)) - })) - neighbors, err := query.All(ctx) - if err != nil { - return err - } - for _, n := range neighbors { - fk := n.BackupJobID - node, ok := nodeids[fk] - if !ok { - return fmt.Errorf(`unexpected referenced foreign-key "backup_job_id" returned %v for node %v`, fk, n.ID) - } - assign(node, n) - } - return nil -} - -func (_q *BackupJobQuery) sqlCount(ctx context.Context) (int, error) { - _spec := _q.querySpec() - _spec.Node.Columns = _q.ctx.Fields - if len(_q.ctx.Fields) > 0 { - _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique - } - return sqlgraph.CountNodes(ctx, _q.driver, _spec) -} - -func (_q *BackupJobQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(backupjob.Table, backupjob.Columns, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) - _spec.From = _q.sql - if unique := _q.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if _q.path != nil { - _spec.Unique = true - } - if fields := _q.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupjob.FieldID) - for i := range fields { - if fields[i] != backupjob.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - } - if ps := _q.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := _q.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := _q.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := _q.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (_q *BackupJobQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(_q.driver.Dialect()) - t1 := builder.Table(backupjob.Table) - columns := _q.ctx.Fields - if len(columns) == 0 { - columns = backupjob.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if _q.sql != nil { - selector = _q.sql - selector.Select(selector.Columns(columns...)...) - } - if _q.ctx.Unique != nil && *_q.ctx.Unique { - selector.Distinct() - } - for _, p := range _q.predicates { - p(selector) - } - for _, p := range _q.order { - p(selector) - } - if offset := _q.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := _q.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// BackupJobGroupBy is the group-by builder for BackupJob entities. -type BackupJobGroupBy struct { - selector - build *BackupJobQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (_g *BackupJobGroupBy) Aggregate(fns ...AggregateFunc) *BackupJobGroupBy { - _g.fns = append(_g.fns, fns...) - return _g -} - -// Scan applies the selector query and scans the result into the given value. -func (_g *BackupJobGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) - if err := _g.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupJobQuery, *BackupJobGroupBy](ctx, _g.build, _g, _g.build.inters, v) -} - -func (_g *BackupJobGroupBy) sqlScan(ctx context.Context, root *BackupJobQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(_g.fns)) - for _, fn := range _g.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) - for _, f := range *_g.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*_g.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// BackupJobSelect is the builder for selecting fields of BackupJob entities. -type BackupJobSelect struct { - *BackupJobQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (_s *BackupJobSelect) Aggregate(fns ...AggregateFunc) *BackupJobSelect { - _s.fns = append(_s.fns, fns...) - return _s -} - -// Scan applies the selector query and scans the result into the given value. -func (_s *BackupJobSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) - if err := _s.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupJobQuery, *BackupJobSelect](ctx, _s.BackupJobQuery, _s, _s.inters, v) -} - -func (_s *BackupJobSelect) sqlScan(ctx context.Context, root *BackupJobQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(_s.fns)) - for _, fn := range _s.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*_s.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _s.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/backup/ent/backupjob_update.go b/backup/ent/backupjob_update.go deleted file mode 100644 index 59b4230f7..000000000 --- a/backup/ent/backupjob_update.go +++ /dev/null @@ -1,1277 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupJobUpdate is the builder for updating BackupJob entities. -type BackupJobUpdate struct { - config - hooks []Hook - mutation *BackupJobMutation -} - -// Where appends a list predicates to the BackupJobUpdate builder. -func (_u *BackupJobUpdate) Where(ps ...predicate.BackupJob) *BackupJobUpdate { - _u.mutation.Where(ps...) - return _u -} - -// SetJobID sets the "job_id" field. -func (_u *BackupJobUpdate) SetJobID(v string) *BackupJobUpdate { - _u.mutation.SetJobID(v) - return _u -} - -// SetNillableJobID sets the "job_id" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableJobID(v *string) *BackupJobUpdate { - if v != nil { - _u.SetJobID(*v) - } - return _u -} - -// SetBackupType sets the "backup_type" field. -func (_u *BackupJobUpdate) SetBackupType(v backupjob.BackupType) *BackupJobUpdate { - _u.mutation.SetBackupType(v) - return _u -} - -// SetNillableBackupType sets the "backup_type" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableBackupType(v *backupjob.BackupType) *BackupJobUpdate { - if v != nil { - _u.SetBackupType(*v) - } - return _u -} - -// SetStatus sets the "status" field. -func (_u *BackupJobUpdate) SetStatus(v backupjob.Status) *BackupJobUpdate { - _u.mutation.SetStatus(v) - return _u -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableStatus(v *backupjob.Status) *BackupJobUpdate { - if v != nil { - _u.SetStatus(*v) - } - return _u -} - -// SetTriggeredBy sets the "triggered_by" field. -func (_u *BackupJobUpdate) SetTriggeredBy(v string) *BackupJobUpdate { - _u.mutation.SetTriggeredBy(v) - return _u -} - -// SetNillableTriggeredBy sets the "triggered_by" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableTriggeredBy(v *string) *BackupJobUpdate { - if v != nil { - _u.SetTriggeredBy(*v) - } - return _u -} - -// SetIdempotencyKey sets the "idempotency_key" field. -func (_u *BackupJobUpdate) SetIdempotencyKey(v string) *BackupJobUpdate { - _u.mutation.SetIdempotencyKey(v) - return _u -} - -// SetNillableIdempotencyKey sets the "idempotency_key" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableIdempotencyKey(v *string) *BackupJobUpdate { - if v != nil { - _u.SetIdempotencyKey(*v) - } - return _u -} - -// ClearIdempotencyKey clears the value of the "idempotency_key" field. -func (_u *BackupJobUpdate) ClearIdempotencyKey() *BackupJobUpdate { - _u.mutation.ClearIdempotencyKey() - return _u -} - -// SetUploadToS3 sets the "upload_to_s3" field. -func (_u *BackupJobUpdate) SetUploadToS3(v bool) *BackupJobUpdate { - _u.mutation.SetUploadToS3(v) - return _u -} - -// SetNillableUploadToS3 sets the "upload_to_s3" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableUploadToS3(v *bool) *BackupJobUpdate { - if v != nil { - _u.SetUploadToS3(*v) - } - return _u -} - -// SetS3ProfileID sets the "s3_profile_id" field. -func (_u *BackupJobUpdate) SetS3ProfileID(v string) *BackupJobUpdate { - _u.mutation.SetS3ProfileID(v) - return _u -} - -// SetNillableS3ProfileID sets the "s3_profile_id" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableS3ProfileID(v *string) *BackupJobUpdate { - if v != nil { - _u.SetS3ProfileID(*v) - } - return _u -} - -// ClearS3ProfileID clears the value of the "s3_profile_id" field. -func (_u *BackupJobUpdate) ClearS3ProfileID() *BackupJobUpdate { - _u.mutation.ClearS3ProfileID() - return _u -} - -// SetPostgresProfileID sets the "postgres_profile_id" field. -func (_u *BackupJobUpdate) SetPostgresProfileID(v string) *BackupJobUpdate { - _u.mutation.SetPostgresProfileID(v) - return _u -} - -// SetNillablePostgresProfileID sets the "postgres_profile_id" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillablePostgresProfileID(v *string) *BackupJobUpdate { - if v != nil { - _u.SetPostgresProfileID(*v) - } - return _u -} - -// ClearPostgresProfileID clears the value of the "postgres_profile_id" field. -func (_u *BackupJobUpdate) ClearPostgresProfileID() *BackupJobUpdate { - _u.mutation.ClearPostgresProfileID() - return _u -} - -// SetRedisProfileID sets the "redis_profile_id" field. -func (_u *BackupJobUpdate) SetRedisProfileID(v string) *BackupJobUpdate { - _u.mutation.SetRedisProfileID(v) - return _u -} - -// SetNillableRedisProfileID sets the "redis_profile_id" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableRedisProfileID(v *string) *BackupJobUpdate { - if v != nil { - _u.SetRedisProfileID(*v) - } - return _u -} - -// ClearRedisProfileID clears the value of the "redis_profile_id" field. -func (_u *BackupJobUpdate) ClearRedisProfileID() *BackupJobUpdate { - _u.mutation.ClearRedisProfileID() - return _u -} - -// SetStartedAt sets the "started_at" field. -func (_u *BackupJobUpdate) SetStartedAt(v time.Time) *BackupJobUpdate { - _u.mutation.SetStartedAt(v) - return _u -} - -// SetNillableStartedAt sets the "started_at" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableStartedAt(v *time.Time) *BackupJobUpdate { - if v != nil { - _u.SetStartedAt(*v) - } - return _u -} - -// ClearStartedAt clears the value of the "started_at" field. -func (_u *BackupJobUpdate) ClearStartedAt() *BackupJobUpdate { - _u.mutation.ClearStartedAt() - return _u -} - -// SetFinishedAt sets the "finished_at" field. -func (_u *BackupJobUpdate) SetFinishedAt(v time.Time) *BackupJobUpdate { - _u.mutation.SetFinishedAt(v) - return _u -} - -// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableFinishedAt(v *time.Time) *BackupJobUpdate { - if v != nil { - _u.SetFinishedAt(*v) - } - return _u -} - -// ClearFinishedAt clears the value of the "finished_at" field. -func (_u *BackupJobUpdate) ClearFinishedAt() *BackupJobUpdate { - _u.mutation.ClearFinishedAt() - return _u -} - -// SetErrorMessage sets the "error_message" field. -func (_u *BackupJobUpdate) SetErrorMessage(v string) *BackupJobUpdate { - _u.mutation.SetErrorMessage(v) - return _u -} - -// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableErrorMessage(v *string) *BackupJobUpdate { - if v != nil { - _u.SetErrorMessage(*v) - } - return _u -} - -// ClearErrorMessage clears the value of the "error_message" field. -func (_u *BackupJobUpdate) ClearErrorMessage() *BackupJobUpdate { - _u.mutation.ClearErrorMessage() - return _u -} - -// SetArtifactLocalPath sets the "artifact_local_path" field. -func (_u *BackupJobUpdate) SetArtifactLocalPath(v string) *BackupJobUpdate { - _u.mutation.SetArtifactLocalPath(v) - return _u -} - -// SetNillableArtifactLocalPath sets the "artifact_local_path" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableArtifactLocalPath(v *string) *BackupJobUpdate { - if v != nil { - _u.SetArtifactLocalPath(*v) - } - return _u -} - -// ClearArtifactLocalPath clears the value of the "artifact_local_path" field. -func (_u *BackupJobUpdate) ClearArtifactLocalPath() *BackupJobUpdate { - _u.mutation.ClearArtifactLocalPath() - return _u -} - -// SetArtifactSizeBytes sets the "artifact_size_bytes" field. -func (_u *BackupJobUpdate) SetArtifactSizeBytes(v int64) *BackupJobUpdate { - _u.mutation.ResetArtifactSizeBytes() - _u.mutation.SetArtifactSizeBytes(v) - return _u -} - -// SetNillableArtifactSizeBytes sets the "artifact_size_bytes" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableArtifactSizeBytes(v *int64) *BackupJobUpdate { - if v != nil { - _u.SetArtifactSizeBytes(*v) - } - return _u -} - -// AddArtifactSizeBytes adds value to the "artifact_size_bytes" field. -func (_u *BackupJobUpdate) AddArtifactSizeBytes(v int64) *BackupJobUpdate { - _u.mutation.AddArtifactSizeBytes(v) - return _u -} - -// ClearArtifactSizeBytes clears the value of the "artifact_size_bytes" field. -func (_u *BackupJobUpdate) ClearArtifactSizeBytes() *BackupJobUpdate { - _u.mutation.ClearArtifactSizeBytes() - return _u -} - -// SetArtifactSha256 sets the "artifact_sha256" field. -func (_u *BackupJobUpdate) SetArtifactSha256(v string) *BackupJobUpdate { - _u.mutation.SetArtifactSha256(v) - return _u -} - -// SetNillableArtifactSha256 sets the "artifact_sha256" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableArtifactSha256(v *string) *BackupJobUpdate { - if v != nil { - _u.SetArtifactSha256(*v) - } - return _u -} - -// ClearArtifactSha256 clears the value of the "artifact_sha256" field. -func (_u *BackupJobUpdate) ClearArtifactSha256() *BackupJobUpdate { - _u.mutation.ClearArtifactSha256() - return _u -} - -// SetS3Bucket sets the "s3_bucket" field. -func (_u *BackupJobUpdate) SetS3Bucket(v string) *BackupJobUpdate { - _u.mutation.SetS3Bucket(v) - return _u -} - -// SetNillableS3Bucket sets the "s3_bucket" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableS3Bucket(v *string) *BackupJobUpdate { - if v != nil { - _u.SetS3Bucket(*v) - } - return _u -} - -// ClearS3Bucket clears the value of the "s3_bucket" field. -func (_u *BackupJobUpdate) ClearS3Bucket() *BackupJobUpdate { - _u.mutation.ClearS3Bucket() - return _u -} - -// SetS3Key sets the "s3_key" field. -func (_u *BackupJobUpdate) SetS3Key(v string) *BackupJobUpdate { - _u.mutation.SetS3Key(v) - return _u -} - -// SetNillableS3Key sets the "s3_key" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableS3Key(v *string) *BackupJobUpdate { - if v != nil { - _u.SetS3Key(*v) - } - return _u -} - -// ClearS3Key clears the value of the "s3_key" field. -func (_u *BackupJobUpdate) ClearS3Key() *BackupJobUpdate { - _u.mutation.ClearS3Key() - return _u -} - -// SetS3Etag sets the "s3_etag" field. -func (_u *BackupJobUpdate) SetS3Etag(v string) *BackupJobUpdate { - _u.mutation.SetS3Etag(v) - return _u -} - -// SetNillableS3Etag sets the "s3_etag" field if the given value is not nil. -func (_u *BackupJobUpdate) SetNillableS3Etag(v *string) *BackupJobUpdate { - if v != nil { - _u.SetS3Etag(*v) - } - return _u -} - -// ClearS3Etag clears the value of the "s3_etag" field. -func (_u *BackupJobUpdate) ClearS3Etag() *BackupJobUpdate { - _u.mutation.ClearS3Etag() - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupJobUpdate) SetUpdatedAt(v time.Time) *BackupJobUpdate { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// AddEventIDs adds the "events" edge to the BackupJobEvent entity by IDs. -func (_u *BackupJobUpdate) AddEventIDs(ids ...int) *BackupJobUpdate { - _u.mutation.AddEventIDs(ids...) - return _u -} - -// AddEvents adds the "events" edges to the BackupJobEvent entity. -func (_u *BackupJobUpdate) AddEvents(v ...*BackupJobEvent) *BackupJobUpdate { - ids := make([]int, len(v)) - for i := range v { - ids[i] = v[i].ID - } - return _u.AddEventIDs(ids...) -} - -// Mutation returns the BackupJobMutation object of the builder. -func (_u *BackupJobUpdate) Mutation() *BackupJobMutation { - return _u.mutation -} - -// ClearEvents clears all "events" edges to the BackupJobEvent entity. -func (_u *BackupJobUpdate) ClearEvents() *BackupJobUpdate { - _u.mutation.ClearEvents() - return _u -} - -// RemoveEventIDs removes the "events" edge to BackupJobEvent entities by IDs. -func (_u *BackupJobUpdate) RemoveEventIDs(ids ...int) *BackupJobUpdate { - _u.mutation.RemoveEventIDs(ids...) - return _u -} - -// RemoveEvents removes "events" edges to BackupJobEvent entities. -func (_u *BackupJobUpdate) RemoveEvents(v ...*BackupJobEvent) *BackupJobUpdate { - ids := make([]int, len(v)) - for i := range v { - ids[i] = v[i].ID - } - return _u.RemoveEventIDs(ids...) -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (_u *BackupJobUpdate) Save(ctx context.Context) (int, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupJobUpdate) SaveX(ctx context.Context) int { - affected, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (_u *BackupJobUpdate) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupJobUpdate) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupJobUpdate) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backupjob.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupJobUpdate) check() error { - if v, ok := _u.mutation.BackupType(); ok { - if err := backupjob.BackupTypeValidator(v); err != nil { - return &ValidationError{Name: "backup_type", err: fmt.Errorf(`ent: validator failed for field "BackupJob.backup_type": %w`, err)} - } - } - if v, ok := _u.mutation.Status(); ok { - if err := backupjob.StatusValidator(v); err != nil { - return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "BackupJob.status": %w`, err)} - } - } - return nil -} - -func (_u *BackupJobUpdate) sqlSave(ctx context.Context) (_node int, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupjob.Table, backupjob.Columns, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.JobID(); ok { - _spec.SetField(backupjob.FieldJobID, field.TypeString, value) - } - if value, ok := _u.mutation.BackupType(); ok { - _spec.SetField(backupjob.FieldBackupType, field.TypeEnum, value) - } - if value, ok := _u.mutation.Status(); ok { - _spec.SetField(backupjob.FieldStatus, field.TypeEnum, value) - } - if value, ok := _u.mutation.TriggeredBy(); ok { - _spec.SetField(backupjob.FieldTriggeredBy, field.TypeString, value) - } - if value, ok := _u.mutation.IdempotencyKey(); ok { - _spec.SetField(backupjob.FieldIdempotencyKey, field.TypeString, value) - } - if _u.mutation.IdempotencyKeyCleared() { - _spec.ClearField(backupjob.FieldIdempotencyKey, field.TypeString) - } - if value, ok := _u.mutation.UploadToS3(); ok { - _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) - } - if value, ok := _u.mutation.S3ProfileID(); ok { - _spec.SetField(backupjob.FieldS3ProfileID, field.TypeString, value) - } - if _u.mutation.S3ProfileIDCleared() { - _spec.ClearField(backupjob.FieldS3ProfileID, field.TypeString) - } - if value, ok := _u.mutation.PostgresProfileID(); ok { - _spec.SetField(backupjob.FieldPostgresProfileID, field.TypeString, value) - } - if _u.mutation.PostgresProfileIDCleared() { - _spec.ClearField(backupjob.FieldPostgresProfileID, field.TypeString) - } - if value, ok := _u.mutation.RedisProfileID(); ok { - _spec.SetField(backupjob.FieldRedisProfileID, field.TypeString, value) - } - if _u.mutation.RedisProfileIDCleared() { - _spec.ClearField(backupjob.FieldRedisProfileID, field.TypeString) - } - if value, ok := _u.mutation.StartedAt(); ok { - _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) - } - if _u.mutation.StartedAtCleared() { - _spec.ClearField(backupjob.FieldStartedAt, field.TypeTime) - } - if value, ok := _u.mutation.FinishedAt(); ok { - _spec.SetField(backupjob.FieldFinishedAt, field.TypeTime, value) - } - if _u.mutation.FinishedAtCleared() { - _spec.ClearField(backupjob.FieldFinishedAt, field.TypeTime) - } - if value, ok := _u.mutation.ErrorMessage(); ok { - _spec.SetField(backupjob.FieldErrorMessage, field.TypeString, value) - } - if _u.mutation.ErrorMessageCleared() { - _spec.ClearField(backupjob.FieldErrorMessage, field.TypeString) - } - if value, ok := _u.mutation.ArtifactLocalPath(); ok { - _spec.SetField(backupjob.FieldArtifactLocalPath, field.TypeString, value) - } - if _u.mutation.ArtifactLocalPathCleared() { - _spec.ClearField(backupjob.FieldArtifactLocalPath, field.TypeString) - } - if value, ok := _u.mutation.ArtifactSizeBytes(); ok { - _spec.SetField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) - } - if value, ok := _u.mutation.AddedArtifactSizeBytes(); ok { - _spec.AddField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) - } - if _u.mutation.ArtifactSizeBytesCleared() { - _spec.ClearField(backupjob.FieldArtifactSizeBytes, field.TypeInt64) - } - if value, ok := _u.mutation.ArtifactSha256(); ok { - _spec.SetField(backupjob.FieldArtifactSha256, field.TypeString, value) - } - if _u.mutation.ArtifactSha256Cleared() { - _spec.ClearField(backupjob.FieldArtifactSha256, field.TypeString) - } - if value, ok := _u.mutation.S3Bucket(); ok { - _spec.SetField(backupjob.FieldS3Bucket, field.TypeString, value) - } - if _u.mutation.S3BucketCleared() { - _spec.ClearField(backupjob.FieldS3Bucket, field.TypeString) - } - if value, ok := _u.mutation.S3Key(); ok { - _spec.SetField(backupjob.FieldS3Key, field.TypeString, value) - } - if _u.mutation.S3KeyCleared() { - _spec.ClearField(backupjob.FieldS3Key, field.TypeString) - } - if value, ok := _u.mutation.S3Etag(); ok { - _spec.SetField(backupjob.FieldS3Etag, field.TypeString, value) - } - if _u.mutation.S3EtagCleared() { - _spec.ClearField(backupjob.FieldS3Etag, field.TypeString) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backupjob.FieldUpdatedAt, field.TypeTime, value) - } - if _u.mutation.EventsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := _u.mutation.RemovedEventsIDs(); len(nodes) > 0 && !_u.mutation.EventsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := _u.mutation.EventsIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupjob.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - _u.mutation.done = true - return _node, nil -} - -// BackupJobUpdateOne is the builder for updating a single BackupJob entity. -type BackupJobUpdateOne struct { - config - fields []string - hooks []Hook - mutation *BackupJobMutation -} - -// SetJobID sets the "job_id" field. -func (_u *BackupJobUpdateOne) SetJobID(v string) *BackupJobUpdateOne { - _u.mutation.SetJobID(v) - return _u -} - -// SetNillableJobID sets the "job_id" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableJobID(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetJobID(*v) - } - return _u -} - -// SetBackupType sets the "backup_type" field. -func (_u *BackupJobUpdateOne) SetBackupType(v backupjob.BackupType) *BackupJobUpdateOne { - _u.mutation.SetBackupType(v) - return _u -} - -// SetNillableBackupType sets the "backup_type" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableBackupType(v *backupjob.BackupType) *BackupJobUpdateOne { - if v != nil { - _u.SetBackupType(*v) - } - return _u -} - -// SetStatus sets the "status" field. -func (_u *BackupJobUpdateOne) SetStatus(v backupjob.Status) *BackupJobUpdateOne { - _u.mutation.SetStatus(v) - return _u -} - -// SetNillableStatus sets the "status" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableStatus(v *backupjob.Status) *BackupJobUpdateOne { - if v != nil { - _u.SetStatus(*v) - } - return _u -} - -// SetTriggeredBy sets the "triggered_by" field. -func (_u *BackupJobUpdateOne) SetTriggeredBy(v string) *BackupJobUpdateOne { - _u.mutation.SetTriggeredBy(v) - return _u -} - -// SetNillableTriggeredBy sets the "triggered_by" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableTriggeredBy(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetTriggeredBy(*v) - } - return _u -} - -// SetIdempotencyKey sets the "idempotency_key" field. -func (_u *BackupJobUpdateOne) SetIdempotencyKey(v string) *BackupJobUpdateOne { - _u.mutation.SetIdempotencyKey(v) - return _u -} - -// SetNillableIdempotencyKey sets the "idempotency_key" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableIdempotencyKey(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetIdempotencyKey(*v) - } - return _u -} - -// ClearIdempotencyKey clears the value of the "idempotency_key" field. -func (_u *BackupJobUpdateOne) ClearIdempotencyKey() *BackupJobUpdateOne { - _u.mutation.ClearIdempotencyKey() - return _u -} - -// SetUploadToS3 sets the "upload_to_s3" field. -func (_u *BackupJobUpdateOne) SetUploadToS3(v bool) *BackupJobUpdateOne { - _u.mutation.SetUploadToS3(v) - return _u -} - -// SetNillableUploadToS3 sets the "upload_to_s3" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableUploadToS3(v *bool) *BackupJobUpdateOne { - if v != nil { - _u.SetUploadToS3(*v) - } - return _u -} - -// SetS3ProfileID sets the "s3_profile_id" field. -func (_u *BackupJobUpdateOne) SetS3ProfileID(v string) *BackupJobUpdateOne { - _u.mutation.SetS3ProfileID(v) - return _u -} - -// SetNillableS3ProfileID sets the "s3_profile_id" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableS3ProfileID(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetS3ProfileID(*v) - } - return _u -} - -// ClearS3ProfileID clears the value of the "s3_profile_id" field. -func (_u *BackupJobUpdateOne) ClearS3ProfileID() *BackupJobUpdateOne { - _u.mutation.ClearS3ProfileID() - return _u -} - -// SetPostgresProfileID sets the "postgres_profile_id" field. -func (_u *BackupJobUpdateOne) SetPostgresProfileID(v string) *BackupJobUpdateOne { - _u.mutation.SetPostgresProfileID(v) - return _u -} - -// SetNillablePostgresProfileID sets the "postgres_profile_id" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillablePostgresProfileID(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetPostgresProfileID(*v) - } - return _u -} - -// ClearPostgresProfileID clears the value of the "postgres_profile_id" field. -func (_u *BackupJobUpdateOne) ClearPostgresProfileID() *BackupJobUpdateOne { - _u.mutation.ClearPostgresProfileID() - return _u -} - -// SetRedisProfileID sets the "redis_profile_id" field. -func (_u *BackupJobUpdateOne) SetRedisProfileID(v string) *BackupJobUpdateOne { - _u.mutation.SetRedisProfileID(v) - return _u -} - -// SetNillableRedisProfileID sets the "redis_profile_id" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableRedisProfileID(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetRedisProfileID(*v) - } - return _u -} - -// ClearRedisProfileID clears the value of the "redis_profile_id" field. -func (_u *BackupJobUpdateOne) ClearRedisProfileID() *BackupJobUpdateOne { - _u.mutation.ClearRedisProfileID() - return _u -} - -// SetStartedAt sets the "started_at" field. -func (_u *BackupJobUpdateOne) SetStartedAt(v time.Time) *BackupJobUpdateOne { - _u.mutation.SetStartedAt(v) - return _u -} - -// SetNillableStartedAt sets the "started_at" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableStartedAt(v *time.Time) *BackupJobUpdateOne { - if v != nil { - _u.SetStartedAt(*v) - } - return _u -} - -// ClearStartedAt clears the value of the "started_at" field. -func (_u *BackupJobUpdateOne) ClearStartedAt() *BackupJobUpdateOne { - _u.mutation.ClearStartedAt() - return _u -} - -// SetFinishedAt sets the "finished_at" field. -func (_u *BackupJobUpdateOne) SetFinishedAt(v time.Time) *BackupJobUpdateOne { - _u.mutation.SetFinishedAt(v) - return _u -} - -// SetNillableFinishedAt sets the "finished_at" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableFinishedAt(v *time.Time) *BackupJobUpdateOne { - if v != nil { - _u.SetFinishedAt(*v) - } - return _u -} - -// ClearFinishedAt clears the value of the "finished_at" field. -func (_u *BackupJobUpdateOne) ClearFinishedAt() *BackupJobUpdateOne { - _u.mutation.ClearFinishedAt() - return _u -} - -// SetErrorMessage sets the "error_message" field. -func (_u *BackupJobUpdateOne) SetErrorMessage(v string) *BackupJobUpdateOne { - _u.mutation.SetErrorMessage(v) - return _u -} - -// SetNillableErrorMessage sets the "error_message" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableErrorMessage(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetErrorMessage(*v) - } - return _u -} - -// ClearErrorMessage clears the value of the "error_message" field. -func (_u *BackupJobUpdateOne) ClearErrorMessage() *BackupJobUpdateOne { - _u.mutation.ClearErrorMessage() - return _u -} - -// SetArtifactLocalPath sets the "artifact_local_path" field. -func (_u *BackupJobUpdateOne) SetArtifactLocalPath(v string) *BackupJobUpdateOne { - _u.mutation.SetArtifactLocalPath(v) - return _u -} - -// SetNillableArtifactLocalPath sets the "artifact_local_path" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableArtifactLocalPath(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetArtifactLocalPath(*v) - } - return _u -} - -// ClearArtifactLocalPath clears the value of the "artifact_local_path" field. -func (_u *BackupJobUpdateOne) ClearArtifactLocalPath() *BackupJobUpdateOne { - _u.mutation.ClearArtifactLocalPath() - return _u -} - -// SetArtifactSizeBytes sets the "artifact_size_bytes" field. -func (_u *BackupJobUpdateOne) SetArtifactSizeBytes(v int64) *BackupJobUpdateOne { - _u.mutation.ResetArtifactSizeBytes() - _u.mutation.SetArtifactSizeBytes(v) - return _u -} - -// SetNillableArtifactSizeBytes sets the "artifact_size_bytes" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableArtifactSizeBytes(v *int64) *BackupJobUpdateOne { - if v != nil { - _u.SetArtifactSizeBytes(*v) - } - return _u -} - -// AddArtifactSizeBytes adds value to the "artifact_size_bytes" field. -func (_u *BackupJobUpdateOne) AddArtifactSizeBytes(v int64) *BackupJobUpdateOne { - _u.mutation.AddArtifactSizeBytes(v) - return _u -} - -// ClearArtifactSizeBytes clears the value of the "artifact_size_bytes" field. -func (_u *BackupJobUpdateOne) ClearArtifactSizeBytes() *BackupJobUpdateOne { - _u.mutation.ClearArtifactSizeBytes() - return _u -} - -// SetArtifactSha256 sets the "artifact_sha256" field. -func (_u *BackupJobUpdateOne) SetArtifactSha256(v string) *BackupJobUpdateOne { - _u.mutation.SetArtifactSha256(v) - return _u -} - -// SetNillableArtifactSha256 sets the "artifact_sha256" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableArtifactSha256(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetArtifactSha256(*v) - } - return _u -} - -// ClearArtifactSha256 clears the value of the "artifact_sha256" field. -func (_u *BackupJobUpdateOne) ClearArtifactSha256() *BackupJobUpdateOne { - _u.mutation.ClearArtifactSha256() - return _u -} - -// SetS3Bucket sets the "s3_bucket" field. -func (_u *BackupJobUpdateOne) SetS3Bucket(v string) *BackupJobUpdateOne { - _u.mutation.SetS3Bucket(v) - return _u -} - -// SetNillableS3Bucket sets the "s3_bucket" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableS3Bucket(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetS3Bucket(*v) - } - return _u -} - -// ClearS3Bucket clears the value of the "s3_bucket" field. -func (_u *BackupJobUpdateOne) ClearS3Bucket() *BackupJobUpdateOne { - _u.mutation.ClearS3Bucket() - return _u -} - -// SetS3Key sets the "s3_key" field. -func (_u *BackupJobUpdateOne) SetS3Key(v string) *BackupJobUpdateOne { - _u.mutation.SetS3Key(v) - return _u -} - -// SetNillableS3Key sets the "s3_key" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableS3Key(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetS3Key(*v) - } - return _u -} - -// ClearS3Key clears the value of the "s3_key" field. -func (_u *BackupJobUpdateOne) ClearS3Key() *BackupJobUpdateOne { - _u.mutation.ClearS3Key() - return _u -} - -// SetS3Etag sets the "s3_etag" field. -func (_u *BackupJobUpdateOne) SetS3Etag(v string) *BackupJobUpdateOne { - _u.mutation.SetS3Etag(v) - return _u -} - -// SetNillableS3Etag sets the "s3_etag" field if the given value is not nil. -func (_u *BackupJobUpdateOne) SetNillableS3Etag(v *string) *BackupJobUpdateOne { - if v != nil { - _u.SetS3Etag(*v) - } - return _u -} - -// ClearS3Etag clears the value of the "s3_etag" field. -func (_u *BackupJobUpdateOne) ClearS3Etag() *BackupJobUpdateOne { - _u.mutation.ClearS3Etag() - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupJobUpdateOne) SetUpdatedAt(v time.Time) *BackupJobUpdateOne { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// AddEventIDs adds the "events" edge to the BackupJobEvent entity by IDs. -func (_u *BackupJobUpdateOne) AddEventIDs(ids ...int) *BackupJobUpdateOne { - _u.mutation.AddEventIDs(ids...) - return _u -} - -// AddEvents adds the "events" edges to the BackupJobEvent entity. -func (_u *BackupJobUpdateOne) AddEvents(v ...*BackupJobEvent) *BackupJobUpdateOne { - ids := make([]int, len(v)) - for i := range v { - ids[i] = v[i].ID - } - return _u.AddEventIDs(ids...) -} - -// Mutation returns the BackupJobMutation object of the builder. -func (_u *BackupJobUpdateOne) Mutation() *BackupJobMutation { - return _u.mutation -} - -// ClearEvents clears all "events" edges to the BackupJobEvent entity. -func (_u *BackupJobUpdateOne) ClearEvents() *BackupJobUpdateOne { - _u.mutation.ClearEvents() - return _u -} - -// RemoveEventIDs removes the "events" edge to BackupJobEvent entities by IDs. -func (_u *BackupJobUpdateOne) RemoveEventIDs(ids ...int) *BackupJobUpdateOne { - _u.mutation.RemoveEventIDs(ids...) - return _u -} - -// RemoveEvents removes "events" edges to BackupJobEvent entities. -func (_u *BackupJobUpdateOne) RemoveEvents(v ...*BackupJobEvent) *BackupJobUpdateOne { - ids := make([]int, len(v)) - for i := range v { - ids[i] = v[i].ID - } - return _u.RemoveEventIDs(ids...) -} - -// Where appends a list predicates to the BackupJobUpdate builder. -func (_u *BackupJobUpdateOne) Where(ps ...predicate.BackupJob) *BackupJobUpdateOne { - _u.mutation.Where(ps...) - return _u -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (_u *BackupJobUpdateOne) Select(field string, fields ...string) *BackupJobUpdateOne { - _u.fields = append([]string{field}, fields...) - return _u -} - -// Save executes the query and returns the updated BackupJob entity. -func (_u *BackupJobUpdateOne) Save(ctx context.Context) (*BackupJob, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupJobUpdateOne) SaveX(ctx context.Context) *BackupJob { - node, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (_u *BackupJobUpdateOne) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupJobUpdateOne) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupJobUpdateOne) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backupjob.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupJobUpdateOne) check() error { - if v, ok := _u.mutation.BackupType(); ok { - if err := backupjob.BackupTypeValidator(v); err != nil { - return &ValidationError{Name: "backup_type", err: fmt.Errorf(`ent: validator failed for field "BackupJob.backup_type": %w`, err)} - } - } - if v, ok := _u.mutation.Status(); ok { - if err := backupjob.StatusValidator(v); err != nil { - return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "BackupJob.status": %w`, err)} - } - } - return nil -} - -func (_u *BackupJobUpdateOne) sqlSave(ctx context.Context) (_node *BackupJob, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupjob.Table, backupjob.Columns, sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt)) - id, ok := _u.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupJob.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := _u.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupjob.FieldID) - for _, f := range fields { - if !backupjob.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - if f != backupjob.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.JobID(); ok { - _spec.SetField(backupjob.FieldJobID, field.TypeString, value) - } - if value, ok := _u.mutation.BackupType(); ok { - _spec.SetField(backupjob.FieldBackupType, field.TypeEnum, value) - } - if value, ok := _u.mutation.Status(); ok { - _spec.SetField(backupjob.FieldStatus, field.TypeEnum, value) - } - if value, ok := _u.mutation.TriggeredBy(); ok { - _spec.SetField(backupjob.FieldTriggeredBy, field.TypeString, value) - } - if value, ok := _u.mutation.IdempotencyKey(); ok { - _spec.SetField(backupjob.FieldIdempotencyKey, field.TypeString, value) - } - if _u.mutation.IdempotencyKeyCleared() { - _spec.ClearField(backupjob.FieldIdempotencyKey, field.TypeString) - } - if value, ok := _u.mutation.UploadToS3(); ok { - _spec.SetField(backupjob.FieldUploadToS3, field.TypeBool, value) - } - if value, ok := _u.mutation.S3ProfileID(); ok { - _spec.SetField(backupjob.FieldS3ProfileID, field.TypeString, value) - } - if _u.mutation.S3ProfileIDCleared() { - _spec.ClearField(backupjob.FieldS3ProfileID, field.TypeString) - } - if value, ok := _u.mutation.PostgresProfileID(); ok { - _spec.SetField(backupjob.FieldPostgresProfileID, field.TypeString, value) - } - if _u.mutation.PostgresProfileIDCleared() { - _spec.ClearField(backupjob.FieldPostgresProfileID, field.TypeString) - } - if value, ok := _u.mutation.RedisProfileID(); ok { - _spec.SetField(backupjob.FieldRedisProfileID, field.TypeString, value) - } - if _u.mutation.RedisProfileIDCleared() { - _spec.ClearField(backupjob.FieldRedisProfileID, field.TypeString) - } - if value, ok := _u.mutation.StartedAt(); ok { - _spec.SetField(backupjob.FieldStartedAt, field.TypeTime, value) - } - if _u.mutation.StartedAtCleared() { - _spec.ClearField(backupjob.FieldStartedAt, field.TypeTime) - } - if value, ok := _u.mutation.FinishedAt(); ok { - _spec.SetField(backupjob.FieldFinishedAt, field.TypeTime, value) - } - if _u.mutation.FinishedAtCleared() { - _spec.ClearField(backupjob.FieldFinishedAt, field.TypeTime) - } - if value, ok := _u.mutation.ErrorMessage(); ok { - _spec.SetField(backupjob.FieldErrorMessage, field.TypeString, value) - } - if _u.mutation.ErrorMessageCleared() { - _spec.ClearField(backupjob.FieldErrorMessage, field.TypeString) - } - if value, ok := _u.mutation.ArtifactLocalPath(); ok { - _spec.SetField(backupjob.FieldArtifactLocalPath, field.TypeString, value) - } - if _u.mutation.ArtifactLocalPathCleared() { - _spec.ClearField(backupjob.FieldArtifactLocalPath, field.TypeString) - } - if value, ok := _u.mutation.ArtifactSizeBytes(); ok { - _spec.SetField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) - } - if value, ok := _u.mutation.AddedArtifactSizeBytes(); ok { - _spec.AddField(backupjob.FieldArtifactSizeBytes, field.TypeInt64, value) - } - if _u.mutation.ArtifactSizeBytesCleared() { - _spec.ClearField(backupjob.FieldArtifactSizeBytes, field.TypeInt64) - } - if value, ok := _u.mutation.ArtifactSha256(); ok { - _spec.SetField(backupjob.FieldArtifactSha256, field.TypeString, value) - } - if _u.mutation.ArtifactSha256Cleared() { - _spec.ClearField(backupjob.FieldArtifactSha256, field.TypeString) - } - if value, ok := _u.mutation.S3Bucket(); ok { - _spec.SetField(backupjob.FieldS3Bucket, field.TypeString, value) - } - if _u.mutation.S3BucketCleared() { - _spec.ClearField(backupjob.FieldS3Bucket, field.TypeString) - } - if value, ok := _u.mutation.S3Key(); ok { - _spec.SetField(backupjob.FieldS3Key, field.TypeString, value) - } - if _u.mutation.S3KeyCleared() { - _spec.ClearField(backupjob.FieldS3Key, field.TypeString) - } - if value, ok := _u.mutation.S3Etag(); ok { - _spec.SetField(backupjob.FieldS3Etag, field.TypeString, value) - } - if _u.mutation.S3EtagCleared() { - _spec.ClearField(backupjob.FieldS3Etag, field.TypeString) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backupjob.FieldUpdatedAt, field.TypeTime, value) - } - if _u.mutation.EventsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := _u.mutation.RemovedEventsIDs(); len(nodes) > 0 && !_u.mutation.EventsCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := _u.mutation.EventsIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.O2M, - Inverse: true, - Table: backupjob.EventsTable, - Columns: []string{backupjob.EventsColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - _node = &BackupJob{config: _u.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupjob.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - _u.mutation.done = true - return _node, nil -} diff --git a/backup/ent/backupjobevent.go b/backup/ent/backupjobevent.go deleted file mode 100644 index 380a820fc..000000000 --- a/backup/ent/backupjobevent.go +++ /dev/null @@ -1,201 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "fmt" - "strings" - "time" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" -) - -// BackupJobEvent is the model entity for the BackupJobEvent schema. -type BackupJobEvent struct { - config `json:"-"` - // ID of the ent. - ID int `json:"id,omitempty"` - // BackupJobID holds the value of the "backup_job_id" field. - BackupJobID int `json:"backup_job_id,omitempty"` - // Level holds the value of the "level" field. - Level backupjobevent.Level `json:"level,omitempty"` - // EventType holds the value of the "event_type" field. - EventType string `json:"event_type,omitempty"` - // Message holds the value of the "message" field. - Message string `json:"message,omitempty"` - // Payload holds the value of the "payload" field. - Payload string `json:"payload,omitempty"` - // EventTime holds the value of the "event_time" field. - EventTime time.Time `json:"event_time,omitempty"` - // CreatedAt holds the value of the "created_at" field. - CreatedAt time.Time `json:"created_at,omitempty"` - // Edges holds the relations/edges for other nodes in the graph. - // The values are being populated by the BackupJobEventQuery when eager-loading is set. - Edges BackupJobEventEdges `json:"edges"` - selectValues sql.SelectValues -} - -// BackupJobEventEdges holds the relations/edges for other nodes in the graph. -type BackupJobEventEdges struct { - // Job holds the value of the job edge. - Job *BackupJob `json:"job,omitempty"` - // loadedTypes holds the information for reporting if a - // type was loaded (or requested) in eager-loading or not. - loadedTypes [1]bool -} - -// JobOrErr returns the Job value or an error if the edge -// was not loaded in eager-loading, or loaded but was not found. -func (e BackupJobEventEdges) JobOrErr() (*BackupJob, error) { - if e.Job != nil { - return e.Job, nil - } else if e.loadedTypes[0] { - return nil, &NotFoundError{label: backupjob.Label} - } - return nil, &NotLoadedError{edge: "job"} -} - -// scanValues returns the types for scanning values from sql.Rows. -func (*BackupJobEvent) scanValues(columns []string) ([]any, error) { - values := make([]any, len(columns)) - for i := range columns { - switch columns[i] { - case backupjobevent.FieldID, backupjobevent.FieldBackupJobID: - values[i] = new(sql.NullInt64) - case backupjobevent.FieldLevel, backupjobevent.FieldEventType, backupjobevent.FieldMessage, backupjobevent.FieldPayload: - values[i] = new(sql.NullString) - case backupjobevent.FieldEventTime, backupjobevent.FieldCreatedAt: - values[i] = new(sql.NullTime) - default: - values[i] = new(sql.UnknownType) - } - } - return values, nil -} - -// assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the BackupJobEvent fields. -func (_m *BackupJobEvent) assignValues(columns []string, values []any) error { - if m, n := len(values), len(columns); m < n { - return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) - } - for i := range columns { - switch columns[i] { - case backupjobevent.FieldID: - value, ok := values[i].(*sql.NullInt64) - if !ok { - return fmt.Errorf("unexpected type %T for field id", value) - } - _m.ID = int(value.Int64) - case backupjobevent.FieldBackupJobID: - if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field backup_job_id", values[i]) - } else if value.Valid { - _m.BackupJobID = int(value.Int64) - } - case backupjobevent.FieldLevel: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field level", values[i]) - } else if value.Valid { - _m.Level = backupjobevent.Level(value.String) - } - case backupjobevent.FieldEventType: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field event_type", values[i]) - } else if value.Valid { - _m.EventType = value.String - } - case backupjobevent.FieldMessage: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field message", values[i]) - } else if value.Valid { - _m.Message = value.String - } - case backupjobevent.FieldPayload: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field payload", values[i]) - } else if value.Valid { - _m.Payload = value.String - } - case backupjobevent.FieldEventTime: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field event_time", values[i]) - } else if value.Valid { - _m.EventTime = value.Time - } - case backupjobevent.FieldCreatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field created_at", values[i]) - } else if value.Valid { - _m.CreatedAt = value.Time - } - default: - _m.selectValues.Set(columns[i], values[i]) - } - } - return nil -} - -// Value returns the ent.Value that was dynamically selected and assigned to the BackupJobEvent. -// This includes values selected through modifiers, order, etc. -func (_m *BackupJobEvent) Value(name string) (ent.Value, error) { - return _m.selectValues.Get(name) -} - -// QueryJob queries the "job" edge of the BackupJobEvent entity. -func (_m *BackupJobEvent) QueryJob() *BackupJobQuery { - return NewBackupJobEventClient(_m.config).QueryJob(_m) -} - -// Update returns a builder for updating this BackupJobEvent. -// Note that you need to call BackupJobEvent.Unwrap() before calling this method if this BackupJobEvent -// was returned from a transaction, and the transaction was committed or rolled back. -func (_m *BackupJobEvent) Update() *BackupJobEventUpdateOne { - return NewBackupJobEventClient(_m.config).UpdateOne(_m) -} - -// Unwrap unwraps the BackupJobEvent entity that was returned from a transaction after it was closed, -// so that all future queries will be executed through the driver which created the transaction. -func (_m *BackupJobEvent) Unwrap() *BackupJobEvent { - _tx, ok := _m.config.driver.(*txDriver) - if !ok { - panic("ent: BackupJobEvent is not a transactional entity") - } - _m.config.driver = _tx.drv - return _m -} - -// String implements the fmt.Stringer. -func (_m *BackupJobEvent) String() string { - var builder strings.Builder - builder.WriteString("BackupJobEvent(") - builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) - builder.WriteString("backup_job_id=") - builder.WriteString(fmt.Sprintf("%v", _m.BackupJobID)) - builder.WriteString(", ") - builder.WriteString("level=") - builder.WriteString(fmt.Sprintf("%v", _m.Level)) - builder.WriteString(", ") - builder.WriteString("event_type=") - builder.WriteString(_m.EventType) - builder.WriteString(", ") - builder.WriteString("message=") - builder.WriteString(_m.Message) - builder.WriteString(", ") - builder.WriteString("payload=") - builder.WriteString(_m.Payload) - builder.WriteString(", ") - builder.WriteString("event_time=") - builder.WriteString(_m.EventTime.Format(time.ANSIC)) - builder.WriteString(", ") - builder.WriteString("created_at=") - builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) - builder.WriteByte(')') - return builder.String() -} - -// BackupJobEvents is a parsable slice of BackupJobEvent. -type BackupJobEvents []*BackupJobEvent diff --git a/backup/ent/backupjobevent/backupjobevent.go b/backup/ent/backupjobevent/backupjobevent.go deleted file mode 100644 index 7ea2f02e8..000000000 --- a/backup/ent/backupjobevent/backupjobevent.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupjobevent - -import ( - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" -) - -const ( - // Label holds the string label denoting the backupjobevent type in the database. - Label = "backup_job_event" - // FieldID holds the string denoting the id field in the database. - FieldID = "id" - // FieldBackupJobID holds the string denoting the backup_job_id field in the database. - FieldBackupJobID = "backup_job_id" - // FieldLevel holds the string denoting the level field in the database. - FieldLevel = "level" - // FieldEventType holds the string denoting the event_type field in the database. - FieldEventType = "event_type" - // FieldMessage holds the string denoting the message field in the database. - FieldMessage = "message" - // FieldPayload holds the string denoting the payload field in the database. - FieldPayload = "payload" - // FieldEventTime holds the string denoting the event_time field in the database. - FieldEventTime = "event_time" - // FieldCreatedAt holds the string denoting the created_at field in the database. - FieldCreatedAt = "created_at" - // EdgeJob holds the string denoting the job edge name in mutations. - EdgeJob = "job" - // Table holds the table name of the backupjobevent in the database. - Table = "backup_job_events" - // JobTable is the table that holds the job relation/edge. - JobTable = "backup_job_events" - // JobInverseTable is the table name for the BackupJob entity. - // It exists in this package in order to avoid circular dependency with the "backupjob" package. - JobInverseTable = "backup_jobs" - // JobColumn is the table column denoting the job relation/edge. - JobColumn = "backup_job_id" -) - -// Columns holds all SQL columns for backupjobevent fields. -var Columns = []string{ - FieldID, - FieldBackupJobID, - FieldLevel, - FieldEventType, - FieldMessage, - FieldPayload, - FieldEventTime, - FieldCreatedAt, -} - -// ValidColumn reports if the column name is valid (part of the table columns). -func ValidColumn(column string) bool { - for i := range Columns { - if column == Columns[i] { - return true - } - } - return false -} - -var ( - // DefaultEventType holds the default value on creation for the "event_type" field. - DefaultEventType string - // DefaultEventTime holds the default value on creation for the "event_time" field. - DefaultEventTime func() time.Time - // DefaultCreatedAt holds the default value on creation for the "created_at" field. - DefaultCreatedAt func() time.Time -) - -// Level defines the type for the "level" enum field. -type Level string - -// LevelInfo is the default value of the Level enum. -const DefaultLevel = LevelInfo - -// Level values. -const ( - LevelInfo Level = "info" - LevelWarning Level = "warning" - LevelError Level = "error" -) - -func (l Level) String() string { - return string(l) -} - -// LevelValidator is a validator for the "level" field enum values. It is called by the builders before save. -func LevelValidator(l Level) error { - switch l { - case LevelInfo, LevelWarning, LevelError: - return nil - default: - return fmt.Errorf("backupjobevent: invalid enum value for level field: %q", l) - } -} - -// OrderOption defines the ordering options for the BackupJobEvent queries. -type OrderOption func(*sql.Selector) - -// ByID orders the results by the id field. -func ByID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldID, opts...).ToFunc() -} - -// ByBackupJobID orders the results by the backup_job_id field. -func ByBackupJobID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldBackupJobID, opts...).ToFunc() -} - -// ByLevel orders the results by the level field. -func ByLevel(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldLevel, opts...).ToFunc() -} - -// ByEventType orders the results by the event_type field. -func ByEventType(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldEventType, opts...).ToFunc() -} - -// ByMessage orders the results by the message field. -func ByMessage(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldMessage, opts...).ToFunc() -} - -// ByPayload orders the results by the payload field. -func ByPayload(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldPayload, opts...).ToFunc() -} - -// ByEventTime orders the results by the event_time field. -func ByEventTime(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldEventTime, opts...).ToFunc() -} - -// ByCreatedAt orders the results by the created_at field. -func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() -} - -// ByJobField orders the results by job field. -func ByJobField(field string, opts ...sql.OrderTermOption) OrderOption { - return func(s *sql.Selector) { - sqlgraph.OrderByNeighborTerms(s, newJobStep(), sql.OrderByField(field, opts...)) - } -} -func newJobStep() *sqlgraph.Step { - return sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.To(JobInverseTable, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, JobTable, JobColumn), - ) -} diff --git a/backup/ent/backupjobevent/where.go b/backup/ent/backupjobevent/where.go deleted file mode 100644 index 756e67841..000000000 --- a/backup/ent/backupjobevent/where.go +++ /dev/null @@ -1,449 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupjobevent - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLTE(FieldID, id)) -} - -// BackupJobID applies equality check predicate on the "backup_job_id" field. It's identical to BackupJobIDEQ. -func BackupJobID(v int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldBackupJobID, v)) -} - -// EventType applies equality check predicate on the "event_type" field. It's identical to EventTypeEQ. -func EventType(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldEventType, v)) -} - -// Message applies equality check predicate on the "message" field. It's identical to MessageEQ. -func Message(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldMessage, v)) -} - -// Payload applies equality check predicate on the "payload" field. It's identical to PayloadEQ. -func Payload(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldPayload, v)) -} - -// EventTime applies equality check predicate on the "event_time" field. It's identical to EventTimeEQ. -func EventTime(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldEventTime, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldCreatedAt, v)) -} - -// BackupJobIDEQ applies the EQ predicate on the "backup_job_id" field. -func BackupJobIDEQ(v int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldBackupJobID, v)) -} - -// BackupJobIDNEQ applies the NEQ predicate on the "backup_job_id" field. -func BackupJobIDNEQ(v int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldBackupJobID, v)) -} - -// BackupJobIDIn applies the In predicate on the "backup_job_id" field. -func BackupJobIDIn(vs ...int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldBackupJobID, vs...)) -} - -// BackupJobIDNotIn applies the NotIn predicate on the "backup_job_id" field. -func BackupJobIDNotIn(vs ...int) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldBackupJobID, vs...)) -} - -// LevelEQ applies the EQ predicate on the "level" field. -func LevelEQ(v Level) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldLevel, v)) -} - -// LevelNEQ applies the NEQ predicate on the "level" field. -func LevelNEQ(v Level) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldLevel, v)) -} - -// LevelIn applies the In predicate on the "level" field. -func LevelIn(vs ...Level) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldLevel, vs...)) -} - -// LevelNotIn applies the NotIn predicate on the "level" field. -func LevelNotIn(vs ...Level) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldLevel, vs...)) -} - -// EventTypeEQ applies the EQ predicate on the "event_type" field. -func EventTypeEQ(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldEventType, v)) -} - -// EventTypeNEQ applies the NEQ predicate on the "event_type" field. -func EventTypeNEQ(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldEventType, v)) -} - -// EventTypeIn applies the In predicate on the "event_type" field. -func EventTypeIn(vs ...string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldEventType, vs...)) -} - -// EventTypeNotIn applies the NotIn predicate on the "event_type" field. -func EventTypeNotIn(vs ...string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldEventType, vs...)) -} - -// EventTypeGT applies the GT predicate on the "event_type" field. -func EventTypeGT(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGT(FieldEventType, v)) -} - -// EventTypeGTE applies the GTE predicate on the "event_type" field. -func EventTypeGTE(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGTE(FieldEventType, v)) -} - -// EventTypeLT applies the LT predicate on the "event_type" field. -func EventTypeLT(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLT(FieldEventType, v)) -} - -// EventTypeLTE applies the LTE predicate on the "event_type" field. -func EventTypeLTE(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLTE(FieldEventType, v)) -} - -// EventTypeContains applies the Contains predicate on the "event_type" field. -func EventTypeContains(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldContains(FieldEventType, v)) -} - -// EventTypeHasPrefix applies the HasPrefix predicate on the "event_type" field. -func EventTypeHasPrefix(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldHasPrefix(FieldEventType, v)) -} - -// EventTypeHasSuffix applies the HasSuffix predicate on the "event_type" field. -func EventTypeHasSuffix(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldHasSuffix(FieldEventType, v)) -} - -// EventTypeEqualFold applies the EqualFold predicate on the "event_type" field. -func EventTypeEqualFold(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEqualFold(FieldEventType, v)) -} - -// EventTypeContainsFold applies the ContainsFold predicate on the "event_type" field. -func EventTypeContainsFold(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldContainsFold(FieldEventType, v)) -} - -// MessageEQ applies the EQ predicate on the "message" field. -func MessageEQ(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldMessage, v)) -} - -// MessageNEQ applies the NEQ predicate on the "message" field. -func MessageNEQ(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldMessage, v)) -} - -// MessageIn applies the In predicate on the "message" field. -func MessageIn(vs ...string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldMessage, vs...)) -} - -// MessageNotIn applies the NotIn predicate on the "message" field. -func MessageNotIn(vs ...string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldMessage, vs...)) -} - -// MessageGT applies the GT predicate on the "message" field. -func MessageGT(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGT(FieldMessage, v)) -} - -// MessageGTE applies the GTE predicate on the "message" field. -func MessageGTE(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGTE(FieldMessage, v)) -} - -// MessageLT applies the LT predicate on the "message" field. -func MessageLT(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLT(FieldMessage, v)) -} - -// MessageLTE applies the LTE predicate on the "message" field. -func MessageLTE(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLTE(FieldMessage, v)) -} - -// MessageContains applies the Contains predicate on the "message" field. -func MessageContains(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldContains(FieldMessage, v)) -} - -// MessageHasPrefix applies the HasPrefix predicate on the "message" field. -func MessageHasPrefix(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldHasPrefix(FieldMessage, v)) -} - -// MessageHasSuffix applies the HasSuffix predicate on the "message" field. -func MessageHasSuffix(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldHasSuffix(FieldMessage, v)) -} - -// MessageEqualFold applies the EqualFold predicate on the "message" field. -func MessageEqualFold(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEqualFold(FieldMessage, v)) -} - -// MessageContainsFold applies the ContainsFold predicate on the "message" field. -func MessageContainsFold(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldContainsFold(FieldMessage, v)) -} - -// PayloadEQ applies the EQ predicate on the "payload" field. -func PayloadEQ(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldPayload, v)) -} - -// PayloadNEQ applies the NEQ predicate on the "payload" field. -func PayloadNEQ(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldPayload, v)) -} - -// PayloadIn applies the In predicate on the "payload" field. -func PayloadIn(vs ...string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldPayload, vs...)) -} - -// PayloadNotIn applies the NotIn predicate on the "payload" field. -func PayloadNotIn(vs ...string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldPayload, vs...)) -} - -// PayloadGT applies the GT predicate on the "payload" field. -func PayloadGT(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGT(FieldPayload, v)) -} - -// PayloadGTE applies the GTE predicate on the "payload" field. -func PayloadGTE(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGTE(FieldPayload, v)) -} - -// PayloadLT applies the LT predicate on the "payload" field. -func PayloadLT(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLT(FieldPayload, v)) -} - -// PayloadLTE applies the LTE predicate on the "payload" field. -func PayloadLTE(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLTE(FieldPayload, v)) -} - -// PayloadContains applies the Contains predicate on the "payload" field. -func PayloadContains(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldContains(FieldPayload, v)) -} - -// PayloadHasPrefix applies the HasPrefix predicate on the "payload" field. -func PayloadHasPrefix(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldHasPrefix(FieldPayload, v)) -} - -// PayloadHasSuffix applies the HasSuffix predicate on the "payload" field. -func PayloadHasSuffix(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldHasSuffix(FieldPayload, v)) -} - -// PayloadIsNil applies the IsNil predicate on the "payload" field. -func PayloadIsNil() predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIsNull(FieldPayload)) -} - -// PayloadNotNil applies the NotNil predicate on the "payload" field. -func PayloadNotNil() predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotNull(FieldPayload)) -} - -// PayloadEqualFold applies the EqualFold predicate on the "payload" field. -func PayloadEqualFold(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEqualFold(FieldPayload, v)) -} - -// PayloadContainsFold applies the ContainsFold predicate on the "payload" field. -func PayloadContainsFold(v string) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldContainsFold(FieldPayload, v)) -} - -// EventTimeEQ applies the EQ predicate on the "event_time" field. -func EventTimeEQ(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldEventTime, v)) -} - -// EventTimeNEQ applies the NEQ predicate on the "event_time" field. -func EventTimeNEQ(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldEventTime, v)) -} - -// EventTimeIn applies the In predicate on the "event_time" field. -func EventTimeIn(vs ...time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldEventTime, vs...)) -} - -// EventTimeNotIn applies the NotIn predicate on the "event_time" field. -func EventTimeNotIn(vs ...time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldEventTime, vs...)) -} - -// EventTimeGT applies the GT predicate on the "event_time" field. -func EventTimeGT(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGT(FieldEventTime, v)) -} - -// EventTimeGTE applies the GTE predicate on the "event_time" field. -func EventTimeGTE(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGTE(FieldEventTime, v)) -} - -// EventTimeLT applies the LT predicate on the "event_time" field. -func EventTimeLT(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLT(FieldEventTime, v)) -} - -// EventTimeLTE applies the LTE predicate on the "event_time" field. -func EventTimeLTE(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLTE(FieldEventTime, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.FieldLTE(FieldCreatedAt, v)) -} - -// HasJob applies the HasEdge predicate on the "job" edge. -func HasJob() predicate.BackupJobEvent { - return predicate.BackupJobEvent(func(s *sql.Selector) { - step := sqlgraph.NewStep( - sqlgraph.From(Table, FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, JobTable, JobColumn), - ) - sqlgraph.HasNeighbors(s, step) - }) -} - -// HasJobWith applies the HasEdge predicate on the "job" edge with a given conditions (other predicates). -func HasJobWith(preds ...predicate.BackupJob) predicate.BackupJobEvent { - return predicate.BackupJobEvent(func(s *sql.Selector) { - step := newJobStep() - sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { - for _, p := range preds { - p(s) - } - }) - }) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.BackupJobEvent) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.BackupJobEvent) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.BackupJobEvent) predicate.BackupJobEvent { - return predicate.BackupJobEvent(sql.NotPredicates(p)) -} diff --git a/backup/ent/backupjobevent_create.go b/backup/ent/backupjobevent_create.go deleted file mode 100644 index 6fc8c64f0..000000000 --- a/backup/ent/backupjobevent_create.go +++ /dev/null @@ -1,354 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" -) - -// BackupJobEventCreate is the builder for creating a BackupJobEvent entity. -type BackupJobEventCreate struct { - config - mutation *BackupJobEventMutation - hooks []Hook -} - -// SetBackupJobID sets the "backup_job_id" field. -func (_c *BackupJobEventCreate) SetBackupJobID(v int) *BackupJobEventCreate { - _c.mutation.SetBackupJobID(v) - return _c -} - -// SetLevel sets the "level" field. -func (_c *BackupJobEventCreate) SetLevel(v backupjobevent.Level) *BackupJobEventCreate { - _c.mutation.SetLevel(v) - return _c -} - -// SetNillableLevel sets the "level" field if the given value is not nil. -func (_c *BackupJobEventCreate) SetNillableLevel(v *backupjobevent.Level) *BackupJobEventCreate { - if v != nil { - _c.SetLevel(*v) - } - return _c -} - -// SetEventType sets the "event_type" field. -func (_c *BackupJobEventCreate) SetEventType(v string) *BackupJobEventCreate { - _c.mutation.SetEventType(v) - return _c -} - -// SetNillableEventType sets the "event_type" field if the given value is not nil. -func (_c *BackupJobEventCreate) SetNillableEventType(v *string) *BackupJobEventCreate { - if v != nil { - _c.SetEventType(*v) - } - return _c -} - -// SetMessage sets the "message" field. -func (_c *BackupJobEventCreate) SetMessage(v string) *BackupJobEventCreate { - _c.mutation.SetMessage(v) - return _c -} - -// SetPayload sets the "payload" field. -func (_c *BackupJobEventCreate) SetPayload(v string) *BackupJobEventCreate { - _c.mutation.SetPayload(v) - return _c -} - -// SetNillablePayload sets the "payload" field if the given value is not nil. -func (_c *BackupJobEventCreate) SetNillablePayload(v *string) *BackupJobEventCreate { - if v != nil { - _c.SetPayload(*v) - } - return _c -} - -// SetEventTime sets the "event_time" field. -func (_c *BackupJobEventCreate) SetEventTime(v time.Time) *BackupJobEventCreate { - _c.mutation.SetEventTime(v) - return _c -} - -// SetNillableEventTime sets the "event_time" field if the given value is not nil. -func (_c *BackupJobEventCreate) SetNillableEventTime(v *time.Time) *BackupJobEventCreate { - if v != nil { - _c.SetEventTime(*v) - } - return _c -} - -// SetCreatedAt sets the "created_at" field. -func (_c *BackupJobEventCreate) SetCreatedAt(v time.Time) *BackupJobEventCreate { - _c.mutation.SetCreatedAt(v) - return _c -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (_c *BackupJobEventCreate) SetNillableCreatedAt(v *time.Time) *BackupJobEventCreate { - if v != nil { - _c.SetCreatedAt(*v) - } - return _c -} - -// SetJobID sets the "job" edge to the BackupJob entity by ID. -func (_c *BackupJobEventCreate) SetJobID(id int) *BackupJobEventCreate { - _c.mutation.SetJobID(id) - return _c -} - -// SetJob sets the "job" edge to the BackupJob entity. -func (_c *BackupJobEventCreate) SetJob(v *BackupJob) *BackupJobEventCreate { - return _c.SetJobID(v.ID) -} - -// Mutation returns the BackupJobEventMutation object of the builder. -func (_c *BackupJobEventCreate) Mutation() *BackupJobEventMutation { - return _c.mutation -} - -// Save creates the BackupJobEvent in the database. -func (_c *BackupJobEventCreate) Save(ctx context.Context) (*BackupJobEvent, error) { - _c.defaults() - return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (_c *BackupJobEventCreate) SaveX(ctx context.Context) *BackupJobEvent { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupJobEventCreate) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupJobEventCreate) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_c *BackupJobEventCreate) defaults() { - if _, ok := _c.mutation.Level(); !ok { - v := backupjobevent.DefaultLevel - _c.mutation.SetLevel(v) - } - if _, ok := _c.mutation.EventType(); !ok { - v := backupjobevent.DefaultEventType - _c.mutation.SetEventType(v) - } - if _, ok := _c.mutation.EventTime(); !ok { - v := backupjobevent.DefaultEventTime() - _c.mutation.SetEventTime(v) - } - if _, ok := _c.mutation.CreatedAt(); !ok { - v := backupjobevent.DefaultCreatedAt() - _c.mutation.SetCreatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_c *BackupJobEventCreate) check() error { - if _, ok := _c.mutation.BackupJobID(); !ok { - return &ValidationError{Name: "backup_job_id", err: errors.New(`ent: missing required field "BackupJobEvent.backup_job_id"`)} - } - if _, ok := _c.mutation.Level(); !ok { - return &ValidationError{Name: "level", err: errors.New(`ent: missing required field "BackupJobEvent.level"`)} - } - if v, ok := _c.mutation.Level(); ok { - if err := backupjobevent.LevelValidator(v); err != nil { - return &ValidationError{Name: "level", err: fmt.Errorf(`ent: validator failed for field "BackupJobEvent.level": %w`, err)} - } - } - if _, ok := _c.mutation.EventType(); !ok { - return &ValidationError{Name: "event_type", err: errors.New(`ent: missing required field "BackupJobEvent.event_type"`)} - } - if _, ok := _c.mutation.Message(); !ok { - return &ValidationError{Name: "message", err: errors.New(`ent: missing required field "BackupJobEvent.message"`)} - } - if _, ok := _c.mutation.EventTime(); !ok { - return &ValidationError{Name: "event_time", err: errors.New(`ent: missing required field "BackupJobEvent.event_time"`)} - } - if _, ok := _c.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupJobEvent.created_at"`)} - } - if len(_c.mutation.JobIDs()) == 0 { - return &ValidationError{Name: "job", err: errors.New(`ent: missing required edge "BackupJobEvent.job"`)} - } - return nil -} - -func (_c *BackupJobEventCreate) sqlSave(ctx context.Context) (*BackupJobEvent, error) { - if err := _c.check(); err != nil { - return nil, err - } - _node, _spec := _c.createSpec() - if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - _c.mutation.id = &_node.ID - _c.mutation.done = true - return _node, nil -} - -func (_c *BackupJobEventCreate) createSpec() (*BackupJobEvent, *sqlgraph.CreateSpec) { - var ( - _node = &BackupJobEvent{config: _c.config} - _spec = sqlgraph.NewCreateSpec(backupjobevent.Table, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) - ) - if value, ok := _c.mutation.Level(); ok { - _spec.SetField(backupjobevent.FieldLevel, field.TypeEnum, value) - _node.Level = value - } - if value, ok := _c.mutation.EventType(); ok { - _spec.SetField(backupjobevent.FieldEventType, field.TypeString, value) - _node.EventType = value - } - if value, ok := _c.mutation.Message(); ok { - _spec.SetField(backupjobevent.FieldMessage, field.TypeString, value) - _node.Message = value - } - if value, ok := _c.mutation.Payload(); ok { - _spec.SetField(backupjobevent.FieldPayload, field.TypeString, value) - _node.Payload = value - } - if value, ok := _c.mutation.EventTime(); ok { - _spec.SetField(backupjobevent.FieldEventTime, field.TypeTime, value) - _node.EventTime = value - } - if value, ok := _c.mutation.CreatedAt(); ok { - _spec.SetField(backupjobevent.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if nodes := _c.mutation.JobIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: backupjobevent.JobTable, - Columns: []string{backupjobevent.JobColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _node.BackupJobID = nodes[0] - _spec.Edges = append(_spec.Edges, edge) - } - return _node, _spec -} - -// BackupJobEventCreateBulk is the builder for creating many BackupJobEvent entities in bulk. -type BackupJobEventCreateBulk struct { - config - err error - builders []*BackupJobEventCreate -} - -// Save creates the BackupJobEvent entities in the database. -func (_c *BackupJobEventCreateBulk) Save(ctx context.Context) ([]*BackupJobEvent, error) { - if _c.err != nil { - return nil, _c.err - } - specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) - nodes := make([]*BackupJobEvent, len(_c.builders)) - mutators := make([]Mutator, len(_c.builders)) - for i := range _c.builders { - func(i int, root context.Context) { - builder := _c.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BackupJobEventMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (_c *BackupJobEventCreateBulk) SaveX(ctx context.Context) []*BackupJobEvent { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupJobEventCreateBulk) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupJobEventCreateBulk) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupjobevent_delete.go b/backup/ent/backupjobevent_delete.go deleted file mode 100644 index 55c0ef9bd..000000000 --- a/backup/ent/backupjobevent_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupJobEventDelete is the builder for deleting a BackupJobEvent entity. -type BackupJobEventDelete struct { - config - hooks []Hook - mutation *BackupJobEventMutation -} - -// Where appends a list predicates to the BackupJobEventDelete builder. -func (_d *BackupJobEventDelete) Where(ps ...predicate.BackupJobEvent) *BackupJobEventDelete { - _d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (_d *BackupJobEventDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupJobEventDelete) ExecX(ctx context.Context) int { - n, err := _d.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (_d *BackupJobEventDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(backupjobevent.Table, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) - if ps := _d.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - _d.mutation.done = true - return affected, err -} - -// BackupJobEventDeleteOne is the builder for deleting a single BackupJobEvent entity. -type BackupJobEventDeleteOne struct { - _d *BackupJobEventDelete -} - -// Where appends a list predicates to the BackupJobEventDelete builder. -func (_d *BackupJobEventDeleteOne) Where(ps ...predicate.BackupJobEvent) *BackupJobEventDeleteOne { - _d._d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query. -func (_d *BackupJobEventDeleteOne) Exec(ctx context.Context) error { - n, err := _d._d.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{backupjobevent.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupJobEventDeleteOne) ExecX(ctx context.Context) { - if err := _d.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupjobevent_query.go b/backup/ent/backupjobevent_query.go deleted file mode 100644 index 6f4b512ff..000000000 --- a/backup/ent/backupjobevent_query.go +++ /dev/null @@ -1,606 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupJobEventQuery is the builder for querying BackupJobEvent entities. -type BackupJobEventQuery struct { - config - ctx *QueryContext - order []backupjobevent.OrderOption - inters []Interceptor - predicates []predicate.BackupJobEvent - withJob *BackupJobQuery - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the BackupJobEventQuery builder. -func (_q *BackupJobEventQuery) Where(ps ...predicate.BackupJobEvent) *BackupJobEventQuery { - _q.predicates = append(_q.predicates, ps...) - return _q -} - -// Limit the number of records to be returned by this query. -func (_q *BackupJobEventQuery) Limit(limit int) *BackupJobEventQuery { - _q.ctx.Limit = &limit - return _q -} - -// Offset to start from. -func (_q *BackupJobEventQuery) Offset(offset int) *BackupJobEventQuery { - _q.ctx.Offset = &offset - return _q -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (_q *BackupJobEventQuery) Unique(unique bool) *BackupJobEventQuery { - _q.ctx.Unique = &unique - return _q -} - -// Order specifies how the records should be ordered. -func (_q *BackupJobEventQuery) Order(o ...backupjobevent.OrderOption) *BackupJobEventQuery { - _q.order = append(_q.order, o...) - return _q -} - -// QueryJob chains the current query on the "job" edge. -func (_q *BackupJobEventQuery) QueryJob() *BackupJobQuery { - query := (&BackupJobClient{config: _q.config}).Query() - query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - selector := _q.sqlQuery(ctx) - if err := selector.Err(); err != nil { - return nil, err - } - step := sqlgraph.NewStep( - sqlgraph.From(backupjobevent.Table, backupjobevent.FieldID, selector), - sqlgraph.To(backupjob.Table, backupjob.FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, backupjobevent.JobTable, backupjobevent.JobColumn), - ) - fromU = sqlgraph.SetNeighbors(_q.driver.Dialect(), step) - return fromU, nil - } - return query -} - -// First returns the first BackupJobEvent entity from the query. -// Returns a *NotFoundError when no BackupJobEvent was found. -func (_q *BackupJobEventQuery) First(ctx context.Context) (*BackupJobEvent, error) { - nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{backupjobevent.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (_q *BackupJobEventQuery) FirstX(ctx context.Context) *BackupJobEvent { - node, err := _q.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first BackupJobEvent ID from the query. -// Returns a *NotFoundError when no BackupJobEvent ID was found. -func (_q *BackupJobEventQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{backupjobevent.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (_q *BackupJobEventQuery) FirstIDX(ctx context.Context) int { - id, err := _q.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single BackupJobEvent entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one BackupJobEvent entity is found. -// Returns a *NotFoundError when no BackupJobEvent entities are found. -func (_q *BackupJobEventQuery) Only(ctx context.Context) (*BackupJobEvent, error) { - nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{backupjobevent.Label} - default: - return nil, &NotSingularError{backupjobevent.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (_q *BackupJobEventQuery) OnlyX(ctx context.Context) *BackupJobEvent { - node, err := _q.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only BackupJobEvent ID in the query. -// Returns a *NotSingularError when more than one BackupJobEvent ID is found. -// Returns a *NotFoundError when no entities are found. -func (_q *BackupJobEventQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{backupjobevent.Label} - default: - err = &NotSingularError{backupjobevent.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (_q *BackupJobEventQuery) OnlyIDX(ctx context.Context) int { - id, err := _q.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of BackupJobEvents. -func (_q *BackupJobEventQuery) All(ctx context.Context) ([]*BackupJobEvent, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*BackupJobEvent, *BackupJobEventQuery]() - return withInterceptors[[]*BackupJobEvent](ctx, _q, qr, _q.inters) -} - -// AllX is like All, but panics if an error occurs. -func (_q *BackupJobEventQuery) AllX(ctx context.Context) []*BackupJobEvent { - nodes, err := _q.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of BackupJobEvent IDs. -func (_q *BackupJobEventQuery) IDs(ctx context.Context) (ids []int, err error) { - if _q.ctx.Unique == nil && _q.path != nil { - _q.Unique(true) - } - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) - if err = _q.Select(backupjobevent.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (_q *BackupJobEventQuery) IDsX(ctx context.Context) []int { - ids, err := _q.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (_q *BackupJobEventQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) - if err := _q.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, _q, querierCount[*BackupJobEventQuery](), _q.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (_q *BackupJobEventQuery) CountX(ctx context.Context) int { - count, err := _q.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (_q *BackupJobEventQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) - switch _, err := _q.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (_q *BackupJobEventQuery) ExistX(ctx context.Context) bool { - exist, err := _q.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the BackupJobEventQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (_q *BackupJobEventQuery) Clone() *BackupJobEventQuery { - if _q == nil { - return nil - } - return &BackupJobEventQuery{ - config: _q.config, - ctx: _q.ctx.Clone(), - order: append([]backupjobevent.OrderOption{}, _q.order...), - inters: append([]Interceptor{}, _q.inters...), - predicates: append([]predicate.BackupJobEvent{}, _q.predicates...), - withJob: _q.withJob.Clone(), - // clone intermediate query. - sql: _q.sql.Clone(), - path: _q.path, - } -} - -// WithJob tells the query-builder to eager-load the nodes that are connected to -// the "job" edge. The optional arguments are used to configure the query builder of the edge. -func (_q *BackupJobEventQuery) WithJob(opts ...func(*BackupJobQuery)) *BackupJobEventQuery { - query := (&BackupJobClient{config: _q.config}).Query() - for _, opt := range opts { - opt(query) - } - _q.withJob = query - return _q -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// BackupJobID int `json:"backup_job_id,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.BackupJobEvent.Query(). -// GroupBy(backupjobevent.FieldBackupJobID). -// Aggregate(ent.Count()). -// Scan(ctx, &v) -func (_q *BackupJobEventQuery) GroupBy(field string, fields ...string) *BackupJobEventGroupBy { - _q.ctx.Fields = append([]string{field}, fields...) - grbuild := &BackupJobEventGroupBy{build: _q} - grbuild.flds = &_q.ctx.Fields - grbuild.label = backupjobevent.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// BackupJobID int `json:"backup_job_id,omitempty"` -// } -// -// client.BackupJobEvent.Query(). -// Select(backupjobevent.FieldBackupJobID). -// Scan(ctx, &v) -func (_q *BackupJobEventQuery) Select(fields ...string) *BackupJobEventSelect { - _q.ctx.Fields = append(_q.ctx.Fields, fields...) - sbuild := &BackupJobEventSelect{BackupJobEventQuery: _q} - sbuild.label = backupjobevent.Label - sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a BackupJobEventSelect configured with the given aggregations. -func (_q *BackupJobEventQuery) Aggregate(fns ...AggregateFunc) *BackupJobEventSelect { - return _q.Select().Aggregate(fns...) -} - -func (_q *BackupJobEventQuery) prepareQuery(ctx context.Context) error { - for _, inter := range _q.inters { - if inter == nil { - return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, _q); err != nil { - return err - } - } - } - for _, f := range _q.ctx.Fields { - if !backupjobevent.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - } - if _q.path != nil { - prev, err := _q.path(ctx) - if err != nil { - return err - } - _q.sql = prev - } - return nil -} - -func (_q *BackupJobEventQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupJobEvent, error) { - var ( - nodes = []*BackupJobEvent{} - _spec = _q.querySpec() - loadedTypes = [1]bool{ - _q.withJob != nil, - } - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*BackupJobEvent).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &BackupJobEvent{config: _q.config} - nodes = append(nodes, node) - node.Edges.loadedTypes = loadedTypes - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - if query := _q.withJob; query != nil { - if err := _q.loadJob(ctx, query, nodes, nil, - func(n *BackupJobEvent, e *BackupJob) { n.Edges.Job = e }); err != nil { - return nil, err - } - } - return nodes, nil -} - -func (_q *BackupJobEventQuery) loadJob(ctx context.Context, query *BackupJobQuery, nodes []*BackupJobEvent, init func(*BackupJobEvent), assign func(*BackupJobEvent, *BackupJob)) error { - ids := make([]int, 0, len(nodes)) - nodeids := make(map[int][]*BackupJobEvent) - for i := range nodes { - fk := nodes[i].BackupJobID - if _, ok := nodeids[fk]; !ok { - ids = append(ids, fk) - } - nodeids[fk] = append(nodeids[fk], nodes[i]) - } - if len(ids) == 0 { - return nil - } - query.Where(backupjob.IDIn(ids...)) - neighbors, err := query.All(ctx) - if err != nil { - return err - } - for _, n := range neighbors { - nodes, ok := nodeids[n.ID] - if !ok { - return fmt.Errorf(`unexpected foreign-key "backup_job_id" returned %v`, n.ID) - } - for i := range nodes { - assign(nodes[i], n) - } - } - return nil -} - -func (_q *BackupJobEventQuery) sqlCount(ctx context.Context) (int, error) { - _spec := _q.querySpec() - _spec.Node.Columns = _q.ctx.Fields - if len(_q.ctx.Fields) > 0 { - _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique - } - return sqlgraph.CountNodes(ctx, _q.driver, _spec) -} - -func (_q *BackupJobEventQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(backupjobevent.Table, backupjobevent.Columns, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) - _spec.From = _q.sql - if unique := _q.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if _q.path != nil { - _spec.Unique = true - } - if fields := _q.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupjobevent.FieldID) - for i := range fields { - if fields[i] != backupjobevent.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - if _q.withJob != nil { - _spec.Node.AddColumnOnce(backupjobevent.FieldBackupJobID) - } - } - if ps := _q.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := _q.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := _q.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := _q.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (_q *BackupJobEventQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(_q.driver.Dialect()) - t1 := builder.Table(backupjobevent.Table) - columns := _q.ctx.Fields - if len(columns) == 0 { - columns = backupjobevent.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if _q.sql != nil { - selector = _q.sql - selector.Select(selector.Columns(columns...)...) - } - if _q.ctx.Unique != nil && *_q.ctx.Unique { - selector.Distinct() - } - for _, p := range _q.predicates { - p(selector) - } - for _, p := range _q.order { - p(selector) - } - if offset := _q.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := _q.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// BackupJobEventGroupBy is the group-by builder for BackupJobEvent entities. -type BackupJobEventGroupBy struct { - selector - build *BackupJobEventQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (_g *BackupJobEventGroupBy) Aggregate(fns ...AggregateFunc) *BackupJobEventGroupBy { - _g.fns = append(_g.fns, fns...) - return _g -} - -// Scan applies the selector query and scans the result into the given value. -func (_g *BackupJobEventGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) - if err := _g.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupJobEventQuery, *BackupJobEventGroupBy](ctx, _g.build, _g, _g.build.inters, v) -} - -func (_g *BackupJobEventGroupBy) sqlScan(ctx context.Context, root *BackupJobEventQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(_g.fns)) - for _, fn := range _g.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) - for _, f := range *_g.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*_g.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// BackupJobEventSelect is the builder for selecting fields of BackupJobEvent entities. -type BackupJobEventSelect struct { - *BackupJobEventQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (_s *BackupJobEventSelect) Aggregate(fns ...AggregateFunc) *BackupJobEventSelect { - _s.fns = append(_s.fns, fns...) - return _s -} - -// Scan applies the selector query and scans the result into the given value. -func (_s *BackupJobEventSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) - if err := _s.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupJobEventQuery, *BackupJobEventSelect](ctx, _s.BackupJobEventQuery, _s, _s.inters, v) -} - -func (_s *BackupJobEventSelect) sqlScan(ctx context.Context, root *BackupJobEventQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(_s.fns)) - for _, fn := range _s.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*_s.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _s.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/backup/ent/backupjobevent_update.go b/backup/ent/backupjobevent_update.go deleted file mode 100644 index 937e6f11f..000000000 --- a/backup/ent/backupjobevent_update.go +++ /dev/null @@ -1,517 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupJobEventUpdate is the builder for updating BackupJobEvent entities. -type BackupJobEventUpdate struct { - config - hooks []Hook - mutation *BackupJobEventMutation -} - -// Where appends a list predicates to the BackupJobEventUpdate builder. -func (_u *BackupJobEventUpdate) Where(ps ...predicate.BackupJobEvent) *BackupJobEventUpdate { - _u.mutation.Where(ps...) - return _u -} - -// SetBackupJobID sets the "backup_job_id" field. -func (_u *BackupJobEventUpdate) SetBackupJobID(v int) *BackupJobEventUpdate { - _u.mutation.SetBackupJobID(v) - return _u -} - -// SetNillableBackupJobID sets the "backup_job_id" field if the given value is not nil. -func (_u *BackupJobEventUpdate) SetNillableBackupJobID(v *int) *BackupJobEventUpdate { - if v != nil { - _u.SetBackupJobID(*v) - } - return _u -} - -// SetLevel sets the "level" field. -func (_u *BackupJobEventUpdate) SetLevel(v backupjobevent.Level) *BackupJobEventUpdate { - _u.mutation.SetLevel(v) - return _u -} - -// SetNillableLevel sets the "level" field if the given value is not nil. -func (_u *BackupJobEventUpdate) SetNillableLevel(v *backupjobevent.Level) *BackupJobEventUpdate { - if v != nil { - _u.SetLevel(*v) - } - return _u -} - -// SetEventType sets the "event_type" field. -func (_u *BackupJobEventUpdate) SetEventType(v string) *BackupJobEventUpdate { - _u.mutation.SetEventType(v) - return _u -} - -// SetNillableEventType sets the "event_type" field if the given value is not nil. -func (_u *BackupJobEventUpdate) SetNillableEventType(v *string) *BackupJobEventUpdate { - if v != nil { - _u.SetEventType(*v) - } - return _u -} - -// SetMessage sets the "message" field. -func (_u *BackupJobEventUpdate) SetMessage(v string) *BackupJobEventUpdate { - _u.mutation.SetMessage(v) - return _u -} - -// SetNillableMessage sets the "message" field if the given value is not nil. -func (_u *BackupJobEventUpdate) SetNillableMessage(v *string) *BackupJobEventUpdate { - if v != nil { - _u.SetMessage(*v) - } - return _u -} - -// SetPayload sets the "payload" field. -func (_u *BackupJobEventUpdate) SetPayload(v string) *BackupJobEventUpdate { - _u.mutation.SetPayload(v) - return _u -} - -// SetNillablePayload sets the "payload" field if the given value is not nil. -func (_u *BackupJobEventUpdate) SetNillablePayload(v *string) *BackupJobEventUpdate { - if v != nil { - _u.SetPayload(*v) - } - return _u -} - -// ClearPayload clears the value of the "payload" field. -func (_u *BackupJobEventUpdate) ClearPayload() *BackupJobEventUpdate { - _u.mutation.ClearPayload() - return _u -} - -// SetEventTime sets the "event_time" field. -func (_u *BackupJobEventUpdate) SetEventTime(v time.Time) *BackupJobEventUpdate { - _u.mutation.SetEventTime(v) - return _u -} - -// SetNillableEventTime sets the "event_time" field if the given value is not nil. -func (_u *BackupJobEventUpdate) SetNillableEventTime(v *time.Time) *BackupJobEventUpdate { - if v != nil { - _u.SetEventTime(*v) - } - return _u -} - -// SetJobID sets the "job" edge to the BackupJob entity by ID. -func (_u *BackupJobEventUpdate) SetJobID(id int) *BackupJobEventUpdate { - _u.mutation.SetJobID(id) - return _u -} - -// SetJob sets the "job" edge to the BackupJob entity. -func (_u *BackupJobEventUpdate) SetJob(v *BackupJob) *BackupJobEventUpdate { - return _u.SetJobID(v.ID) -} - -// Mutation returns the BackupJobEventMutation object of the builder. -func (_u *BackupJobEventUpdate) Mutation() *BackupJobEventMutation { - return _u.mutation -} - -// ClearJob clears the "job" edge to the BackupJob entity. -func (_u *BackupJobEventUpdate) ClearJob() *BackupJobEventUpdate { - _u.mutation.ClearJob() - return _u -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (_u *BackupJobEventUpdate) Save(ctx context.Context) (int, error) { - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupJobEventUpdate) SaveX(ctx context.Context) int { - affected, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (_u *BackupJobEventUpdate) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupJobEventUpdate) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupJobEventUpdate) check() error { - if v, ok := _u.mutation.Level(); ok { - if err := backupjobevent.LevelValidator(v); err != nil { - return &ValidationError{Name: "level", err: fmt.Errorf(`ent: validator failed for field "BackupJobEvent.level": %w`, err)} - } - } - if _u.mutation.JobCleared() && len(_u.mutation.JobIDs()) > 0 { - return errors.New(`ent: clearing a required unique edge "BackupJobEvent.job"`) - } - return nil -} - -func (_u *BackupJobEventUpdate) sqlSave(ctx context.Context) (_node int, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupjobevent.Table, backupjobevent.Columns, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.Level(); ok { - _spec.SetField(backupjobevent.FieldLevel, field.TypeEnum, value) - } - if value, ok := _u.mutation.EventType(); ok { - _spec.SetField(backupjobevent.FieldEventType, field.TypeString, value) - } - if value, ok := _u.mutation.Message(); ok { - _spec.SetField(backupjobevent.FieldMessage, field.TypeString, value) - } - if value, ok := _u.mutation.Payload(); ok { - _spec.SetField(backupjobevent.FieldPayload, field.TypeString, value) - } - if _u.mutation.PayloadCleared() { - _spec.ClearField(backupjobevent.FieldPayload, field.TypeString) - } - if value, ok := _u.mutation.EventTime(); ok { - _spec.SetField(backupjobevent.FieldEventTime, field.TypeTime, value) - } - if _u.mutation.JobCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: backupjobevent.JobTable, - Columns: []string{backupjobevent.JobColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := _u.mutation.JobIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: backupjobevent.JobTable, - Columns: []string{backupjobevent.JobColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupjobevent.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - _u.mutation.done = true - return _node, nil -} - -// BackupJobEventUpdateOne is the builder for updating a single BackupJobEvent entity. -type BackupJobEventUpdateOne struct { - config - fields []string - hooks []Hook - mutation *BackupJobEventMutation -} - -// SetBackupJobID sets the "backup_job_id" field. -func (_u *BackupJobEventUpdateOne) SetBackupJobID(v int) *BackupJobEventUpdateOne { - _u.mutation.SetBackupJobID(v) - return _u -} - -// SetNillableBackupJobID sets the "backup_job_id" field if the given value is not nil. -func (_u *BackupJobEventUpdateOne) SetNillableBackupJobID(v *int) *BackupJobEventUpdateOne { - if v != nil { - _u.SetBackupJobID(*v) - } - return _u -} - -// SetLevel sets the "level" field. -func (_u *BackupJobEventUpdateOne) SetLevel(v backupjobevent.Level) *BackupJobEventUpdateOne { - _u.mutation.SetLevel(v) - return _u -} - -// SetNillableLevel sets the "level" field if the given value is not nil. -func (_u *BackupJobEventUpdateOne) SetNillableLevel(v *backupjobevent.Level) *BackupJobEventUpdateOne { - if v != nil { - _u.SetLevel(*v) - } - return _u -} - -// SetEventType sets the "event_type" field. -func (_u *BackupJobEventUpdateOne) SetEventType(v string) *BackupJobEventUpdateOne { - _u.mutation.SetEventType(v) - return _u -} - -// SetNillableEventType sets the "event_type" field if the given value is not nil. -func (_u *BackupJobEventUpdateOne) SetNillableEventType(v *string) *BackupJobEventUpdateOne { - if v != nil { - _u.SetEventType(*v) - } - return _u -} - -// SetMessage sets the "message" field. -func (_u *BackupJobEventUpdateOne) SetMessage(v string) *BackupJobEventUpdateOne { - _u.mutation.SetMessage(v) - return _u -} - -// SetNillableMessage sets the "message" field if the given value is not nil. -func (_u *BackupJobEventUpdateOne) SetNillableMessage(v *string) *BackupJobEventUpdateOne { - if v != nil { - _u.SetMessage(*v) - } - return _u -} - -// SetPayload sets the "payload" field. -func (_u *BackupJobEventUpdateOne) SetPayload(v string) *BackupJobEventUpdateOne { - _u.mutation.SetPayload(v) - return _u -} - -// SetNillablePayload sets the "payload" field if the given value is not nil. -func (_u *BackupJobEventUpdateOne) SetNillablePayload(v *string) *BackupJobEventUpdateOne { - if v != nil { - _u.SetPayload(*v) - } - return _u -} - -// ClearPayload clears the value of the "payload" field. -func (_u *BackupJobEventUpdateOne) ClearPayload() *BackupJobEventUpdateOne { - _u.mutation.ClearPayload() - return _u -} - -// SetEventTime sets the "event_time" field. -func (_u *BackupJobEventUpdateOne) SetEventTime(v time.Time) *BackupJobEventUpdateOne { - _u.mutation.SetEventTime(v) - return _u -} - -// SetNillableEventTime sets the "event_time" field if the given value is not nil. -func (_u *BackupJobEventUpdateOne) SetNillableEventTime(v *time.Time) *BackupJobEventUpdateOne { - if v != nil { - _u.SetEventTime(*v) - } - return _u -} - -// SetJobID sets the "job" edge to the BackupJob entity by ID. -func (_u *BackupJobEventUpdateOne) SetJobID(id int) *BackupJobEventUpdateOne { - _u.mutation.SetJobID(id) - return _u -} - -// SetJob sets the "job" edge to the BackupJob entity. -func (_u *BackupJobEventUpdateOne) SetJob(v *BackupJob) *BackupJobEventUpdateOne { - return _u.SetJobID(v.ID) -} - -// Mutation returns the BackupJobEventMutation object of the builder. -func (_u *BackupJobEventUpdateOne) Mutation() *BackupJobEventMutation { - return _u.mutation -} - -// ClearJob clears the "job" edge to the BackupJob entity. -func (_u *BackupJobEventUpdateOne) ClearJob() *BackupJobEventUpdateOne { - _u.mutation.ClearJob() - return _u -} - -// Where appends a list predicates to the BackupJobEventUpdate builder. -func (_u *BackupJobEventUpdateOne) Where(ps ...predicate.BackupJobEvent) *BackupJobEventUpdateOne { - _u.mutation.Where(ps...) - return _u -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (_u *BackupJobEventUpdateOne) Select(field string, fields ...string) *BackupJobEventUpdateOne { - _u.fields = append([]string{field}, fields...) - return _u -} - -// Save executes the query and returns the updated BackupJobEvent entity. -func (_u *BackupJobEventUpdateOne) Save(ctx context.Context) (*BackupJobEvent, error) { - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupJobEventUpdateOne) SaveX(ctx context.Context) *BackupJobEvent { - node, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (_u *BackupJobEventUpdateOne) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupJobEventUpdateOne) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupJobEventUpdateOne) check() error { - if v, ok := _u.mutation.Level(); ok { - if err := backupjobevent.LevelValidator(v); err != nil { - return &ValidationError{Name: "level", err: fmt.Errorf(`ent: validator failed for field "BackupJobEvent.level": %w`, err)} - } - } - if _u.mutation.JobCleared() && len(_u.mutation.JobIDs()) > 0 { - return errors.New(`ent: clearing a required unique edge "BackupJobEvent.job"`) - } - return nil -} - -func (_u *BackupJobEventUpdateOne) sqlSave(ctx context.Context) (_node *BackupJobEvent, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupjobevent.Table, backupjobevent.Columns, sqlgraph.NewFieldSpec(backupjobevent.FieldID, field.TypeInt)) - id, ok := _u.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupJobEvent.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := _u.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupjobevent.FieldID) - for _, f := range fields { - if !backupjobevent.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - if f != backupjobevent.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.Level(); ok { - _spec.SetField(backupjobevent.FieldLevel, field.TypeEnum, value) - } - if value, ok := _u.mutation.EventType(); ok { - _spec.SetField(backupjobevent.FieldEventType, field.TypeString, value) - } - if value, ok := _u.mutation.Message(); ok { - _spec.SetField(backupjobevent.FieldMessage, field.TypeString, value) - } - if value, ok := _u.mutation.Payload(); ok { - _spec.SetField(backupjobevent.FieldPayload, field.TypeString, value) - } - if _u.mutation.PayloadCleared() { - _spec.ClearField(backupjobevent.FieldPayload, field.TypeString) - } - if value, ok := _u.mutation.EventTime(); ok { - _spec.SetField(backupjobevent.FieldEventTime, field.TypeTime, value) - } - if _u.mutation.JobCleared() { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: backupjobevent.JobTable, - Columns: []string{backupjobevent.JobColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), - }, - } - _spec.Edges.Clear = append(_spec.Edges.Clear, edge) - } - if nodes := _u.mutation.JobIDs(); len(nodes) > 0 { - edge := &sqlgraph.EdgeSpec{ - Rel: sqlgraph.M2O, - Inverse: false, - Table: backupjobevent.JobTable, - Columns: []string{backupjobevent.JobColumn}, - Bidi: false, - Target: &sqlgraph.EdgeTarget{ - IDSpec: sqlgraph.NewFieldSpec(backupjob.FieldID, field.TypeInt), - }, - } - for _, k := range nodes { - edge.Target.Nodes = append(edge.Target.Nodes, k) - } - _spec.Edges.Add = append(_spec.Edges.Add, edge) - } - _node = &BackupJobEvent{config: _u.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupjobevent.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - _u.mutation.done = true - return _node, nil -} diff --git a/backup/ent/backups3config.go b/backup/ent/backups3config.go deleted file mode 100644 index c9bfaaa3a..000000000 --- a/backup/ent/backups3config.go +++ /dev/null @@ -1,250 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "fmt" - "strings" - "time" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" -) - -// BackupS3Config is the model entity for the BackupS3Config schema. -type BackupS3Config struct { - config `json:"-"` - // ID of the ent. - ID int `json:"id,omitempty"` - // ProfileID holds the value of the "profile_id" field. - ProfileID string `json:"profile_id,omitempty"` - // Name holds the value of the "name" field. - Name string `json:"name,omitempty"` - // IsActive holds the value of the "is_active" field. - IsActive bool `json:"is_active,omitempty"` - // Enabled holds the value of the "enabled" field. - Enabled bool `json:"enabled,omitempty"` - // Endpoint holds the value of the "endpoint" field. - Endpoint string `json:"endpoint,omitempty"` - // Region holds the value of the "region" field. - Region string `json:"region,omitempty"` - // Bucket holds the value of the "bucket" field. - Bucket string `json:"bucket,omitempty"` - // AccessKeyID holds the value of the "access_key_id" field. - AccessKeyID string `json:"access_key_id,omitempty"` - // SecretAccessKeyEncrypted holds the value of the "secret_access_key_encrypted" field. - SecretAccessKeyEncrypted string `json:"-"` - // Prefix holds the value of the "prefix" field. - Prefix string `json:"prefix,omitempty"` - // ForcePathStyle holds the value of the "force_path_style" field. - ForcePathStyle bool `json:"force_path_style,omitempty"` - // UseSsl holds the value of the "use_ssl" field. - UseSsl bool `json:"use_ssl,omitempty"` - // CreatedAt holds the value of the "created_at" field. - CreatedAt time.Time `json:"created_at,omitempty"` - // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt time.Time `json:"updated_at,omitempty"` - selectValues sql.SelectValues -} - -// scanValues returns the types for scanning values from sql.Rows. -func (*BackupS3Config) scanValues(columns []string) ([]any, error) { - values := make([]any, len(columns)) - for i := range columns { - switch columns[i] { - case backups3config.FieldIsActive, backups3config.FieldEnabled, backups3config.FieldForcePathStyle, backups3config.FieldUseSsl: - values[i] = new(sql.NullBool) - case backups3config.FieldID: - values[i] = new(sql.NullInt64) - case backups3config.FieldProfileID, backups3config.FieldName, backups3config.FieldEndpoint, backups3config.FieldRegion, backups3config.FieldBucket, backups3config.FieldAccessKeyID, backups3config.FieldSecretAccessKeyEncrypted, backups3config.FieldPrefix: - values[i] = new(sql.NullString) - case backups3config.FieldCreatedAt, backups3config.FieldUpdatedAt: - values[i] = new(sql.NullTime) - default: - values[i] = new(sql.UnknownType) - } - } - return values, nil -} - -// assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the BackupS3Config fields. -func (_m *BackupS3Config) assignValues(columns []string, values []any) error { - if m, n := len(values), len(columns); m < n { - return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) - } - for i := range columns { - switch columns[i] { - case backups3config.FieldID: - value, ok := values[i].(*sql.NullInt64) - if !ok { - return fmt.Errorf("unexpected type %T for field id", value) - } - _m.ID = int(value.Int64) - case backups3config.FieldProfileID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field profile_id", values[i]) - } else if value.Valid { - _m.ProfileID = value.String - } - case backups3config.FieldName: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field name", values[i]) - } else if value.Valid { - _m.Name = value.String - } - case backups3config.FieldIsActive: - if value, ok := values[i].(*sql.NullBool); !ok { - return fmt.Errorf("unexpected type %T for field is_active", values[i]) - } else if value.Valid { - _m.IsActive = value.Bool - } - case backups3config.FieldEnabled: - if value, ok := values[i].(*sql.NullBool); !ok { - return fmt.Errorf("unexpected type %T for field enabled", values[i]) - } else if value.Valid { - _m.Enabled = value.Bool - } - case backups3config.FieldEndpoint: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field endpoint", values[i]) - } else if value.Valid { - _m.Endpoint = value.String - } - case backups3config.FieldRegion: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field region", values[i]) - } else if value.Valid { - _m.Region = value.String - } - case backups3config.FieldBucket: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field bucket", values[i]) - } else if value.Valid { - _m.Bucket = value.String - } - case backups3config.FieldAccessKeyID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field access_key_id", values[i]) - } else if value.Valid { - _m.AccessKeyID = value.String - } - case backups3config.FieldSecretAccessKeyEncrypted: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field secret_access_key_encrypted", values[i]) - } else if value.Valid { - _m.SecretAccessKeyEncrypted = value.String - } - case backups3config.FieldPrefix: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field prefix", values[i]) - } else if value.Valid { - _m.Prefix = value.String - } - case backups3config.FieldForcePathStyle: - if value, ok := values[i].(*sql.NullBool); !ok { - return fmt.Errorf("unexpected type %T for field force_path_style", values[i]) - } else if value.Valid { - _m.ForcePathStyle = value.Bool - } - case backups3config.FieldUseSsl: - if value, ok := values[i].(*sql.NullBool); !ok { - return fmt.Errorf("unexpected type %T for field use_ssl", values[i]) - } else if value.Valid { - _m.UseSsl = value.Bool - } - case backups3config.FieldCreatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field created_at", values[i]) - } else if value.Valid { - _m.CreatedAt = value.Time - } - case backups3config.FieldUpdatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field updated_at", values[i]) - } else if value.Valid { - _m.UpdatedAt = value.Time - } - default: - _m.selectValues.Set(columns[i], values[i]) - } - } - return nil -} - -// Value returns the ent.Value that was dynamically selected and assigned to the BackupS3Config. -// This includes values selected through modifiers, order, etc. -func (_m *BackupS3Config) Value(name string) (ent.Value, error) { - return _m.selectValues.Get(name) -} - -// Update returns a builder for updating this BackupS3Config. -// Note that you need to call BackupS3Config.Unwrap() before calling this method if this BackupS3Config -// was returned from a transaction, and the transaction was committed or rolled back. -func (_m *BackupS3Config) Update() *BackupS3ConfigUpdateOne { - return NewBackupS3ConfigClient(_m.config).UpdateOne(_m) -} - -// Unwrap unwraps the BackupS3Config entity that was returned from a transaction after it was closed, -// so that all future queries will be executed through the driver which created the transaction. -func (_m *BackupS3Config) Unwrap() *BackupS3Config { - _tx, ok := _m.config.driver.(*txDriver) - if !ok { - panic("ent: BackupS3Config is not a transactional entity") - } - _m.config.driver = _tx.drv - return _m -} - -// String implements the fmt.Stringer. -func (_m *BackupS3Config) String() string { - var builder strings.Builder - builder.WriteString("BackupS3Config(") - builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) - builder.WriteString("profile_id=") - builder.WriteString(_m.ProfileID) - builder.WriteString(", ") - builder.WriteString("name=") - builder.WriteString(_m.Name) - builder.WriteString(", ") - builder.WriteString("is_active=") - builder.WriteString(fmt.Sprintf("%v", _m.IsActive)) - builder.WriteString(", ") - builder.WriteString("enabled=") - builder.WriteString(fmt.Sprintf("%v", _m.Enabled)) - builder.WriteString(", ") - builder.WriteString("endpoint=") - builder.WriteString(_m.Endpoint) - builder.WriteString(", ") - builder.WriteString("region=") - builder.WriteString(_m.Region) - builder.WriteString(", ") - builder.WriteString("bucket=") - builder.WriteString(_m.Bucket) - builder.WriteString(", ") - builder.WriteString("access_key_id=") - builder.WriteString(_m.AccessKeyID) - builder.WriteString(", ") - builder.WriteString("secret_access_key_encrypted=") - builder.WriteString(", ") - builder.WriteString("prefix=") - builder.WriteString(_m.Prefix) - builder.WriteString(", ") - builder.WriteString("force_path_style=") - builder.WriteString(fmt.Sprintf("%v", _m.ForcePathStyle)) - builder.WriteString(", ") - builder.WriteString("use_ssl=") - builder.WriteString(fmt.Sprintf("%v", _m.UseSsl)) - builder.WriteString(", ") - builder.WriteString("created_at=") - builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) - builder.WriteString(", ") - builder.WriteString("updated_at=") - builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) - builder.WriteByte(')') - return builder.String() -} - -// BackupS3Configs is a parsable slice of BackupS3Config. -type BackupS3Configs []*BackupS3Config diff --git a/backup/ent/backups3config/backups3config.go b/backup/ent/backups3config/backups3config.go deleted file mode 100644 index 1fdef668e..000000000 --- a/backup/ent/backups3config/backups3config.go +++ /dev/null @@ -1,184 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backups3config - -import ( - "time" - - "entgo.io/ent/dialect/sql" -) - -const ( - // Label holds the string label denoting the backups3config type in the database. - Label = "backup_s3config" - // FieldID holds the string denoting the id field in the database. - FieldID = "id" - // FieldProfileID holds the string denoting the profile_id field in the database. - FieldProfileID = "profile_id" - // FieldName holds the string denoting the name field in the database. - FieldName = "name" - // FieldIsActive holds the string denoting the is_active field in the database. - FieldIsActive = "is_active" - // FieldEnabled holds the string denoting the enabled field in the database. - FieldEnabled = "enabled" - // FieldEndpoint holds the string denoting the endpoint field in the database. - FieldEndpoint = "endpoint" - // FieldRegion holds the string denoting the region field in the database. - FieldRegion = "region" - // FieldBucket holds the string denoting the bucket field in the database. - FieldBucket = "bucket" - // FieldAccessKeyID holds the string denoting the access_key_id field in the database. - FieldAccessKeyID = "access_key_id" - // FieldSecretAccessKeyEncrypted holds the string denoting the secret_access_key_encrypted field in the database. - FieldSecretAccessKeyEncrypted = "secret_access_key_encrypted" - // FieldPrefix holds the string denoting the prefix field in the database. - FieldPrefix = "prefix" - // FieldForcePathStyle holds the string denoting the force_path_style field in the database. - FieldForcePathStyle = "force_path_style" - // FieldUseSsl holds the string denoting the use_ssl field in the database. - FieldUseSsl = "use_ssl" - // FieldCreatedAt holds the string denoting the created_at field in the database. - FieldCreatedAt = "created_at" - // FieldUpdatedAt holds the string denoting the updated_at field in the database. - FieldUpdatedAt = "updated_at" - // Table holds the table name of the backups3config in the database. - Table = "backup_s3configs" -) - -// Columns holds all SQL columns for backups3config fields. -var Columns = []string{ - FieldID, - FieldProfileID, - FieldName, - FieldIsActive, - FieldEnabled, - FieldEndpoint, - FieldRegion, - FieldBucket, - FieldAccessKeyID, - FieldSecretAccessKeyEncrypted, - FieldPrefix, - FieldForcePathStyle, - FieldUseSsl, - FieldCreatedAt, - FieldUpdatedAt, -} - -// ValidColumn reports if the column name is valid (part of the table columns). -func ValidColumn(column string) bool { - for i := range Columns { - if column == Columns[i] { - return true - } - } - return false -} - -var ( - // DefaultProfileID holds the default value on creation for the "profile_id" field. - DefaultProfileID string - // DefaultName holds the default value on creation for the "name" field. - DefaultName string - // DefaultIsActive holds the default value on creation for the "is_active" field. - DefaultIsActive bool - // DefaultEnabled holds the default value on creation for the "enabled" field. - DefaultEnabled bool - // DefaultEndpoint holds the default value on creation for the "endpoint" field. - DefaultEndpoint string - // DefaultRegion holds the default value on creation for the "region" field. - DefaultRegion string - // DefaultBucket holds the default value on creation for the "bucket" field. - DefaultBucket string - // DefaultAccessKeyID holds the default value on creation for the "access_key_id" field. - DefaultAccessKeyID string - // DefaultPrefix holds the default value on creation for the "prefix" field. - DefaultPrefix string - // DefaultForcePathStyle holds the default value on creation for the "force_path_style" field. - DefaultForcePathStyle bool - // DefaultUseSsl holds the default value on creation for the "use_ssl" field. - DefaultUseSsl bool - // DefaultCreatedAt holds the default value on creation for the "created_at" field. - DefaultCreatedAt func() time.Time - // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. - DefaultUpdatedAt func() time.Time - // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. - UpdateDefaultUpdatedAt func() time.Time -) - -// OrderOption defines the ordering options for the BackupS3Config queries. -type OrderOption func(*sql.Selector) - -// ByID orders the results by the id field. -func ByID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldID, opts...).ToFunc() -} - -// ByProfileID orders the results by the profile_id field. -func ByProfileID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldProfileID, opts...).ToFunc() -} - -// ByName orders the results by the name field. -func ByName(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldName, opts...).ToFunc() -} - -// ByIsActive orders the results by the is_active field. -func ByIsActive(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldIsActive, opts...).ToFunc() -} - -// ByEnabled orders the results by the enabled field. -func ByEnabled(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldEnabled, opts...).ToFunc() -} - -// ByEndpoint orders the results by the endpoint field. -func ByEndpoint(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldEndpoint, opts...).ToFunc() -} - -// ByRegion orders the results by the region field. -func ByRegion(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldRegion, opts...).ToFunc() -} - -// ByBucket orders the results by the bucket field. -func ByBucket(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldBucket, opts...).ToFunc() -} - -// ByAccessKeyID orders the results by the access_key_id field. -func ByAccessKeyID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldAccessKeyID, opts...).ToFunc() -} - -// BySecretAccessKeyEncrypted orders the results by the secret_access_key_encrypted field. -func BySecretAccessKeyEncrypted(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldSecretAccessKeyEncrypted, opts...).ToFunc() -} - -// ByPrefix orders the results by the prefix field. -func ByPrefix(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldPrefix, opts...).ToFunc() -} - -// ByForcePathStyle orders the results by the force_path_style field. -func ByForcePathStyle(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldForcePathStyle, opts...).ToFunc() -} - -// ByUseSsl orders the results by the use_ssl field. -func ByUseSsl(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUseSsl, opts...).ToFunc() -} - -// ByCreatedAt orders the results by the created_at field. -func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() -} - -// ByUpdatedAt orders the results by the updated_at field. -func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() -} diff --git a/backup/ent/backups3config/where.go b/backup/ent/backups3config/where.go deleted file mode 100644 index 0673d0f8d..000000000 --- a/backup/ent/backups3config/where.go +++ /dev/null @@ -1,790 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backups3config - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldID, id)) -} - -// ProfileID applies equality check predicate on the "profile_id" field. It's identical to ProfileIDEQ. -func ProfileID(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldProfileID, v)) -} - -// Name applies equality check predicate on the "name" field. It's identical to NameEQ. -func Name(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldName, v)) -} - -// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ. -func IsActive(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldIsActive, v)) -} - -// Enabled applies equality check predicate on the "enabled" field. It's identical to EnabledEQ. -func Enabled(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldEnabled, v)) -} - -// Endpoint applies equality check predicate on the "endpoint" field. It's identical to EndpointEQ. -func Endpoint(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldEndpoint, v)) -} - -// Region applies equality check predicate on the "region" field. It's identical to RegionEQ. -func Region(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldRegion, v)) -} - -// Bucket applies equality check predicate on the "bucket" field. It's identical to BucketEQ. -func Bucket(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldBucket, v)) -} - -// AccessKeyID applies equality check predicate on the "access_key_id" field. It's identical to AccessKeyIDEQ. -func AccessKeyID(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldAccessKeyID, v)) -} - -// SecretAccessKeyEncrypted applies equality check predicate on the "secret_access_key_encrypted" field. It's identical to SecretAccessKeyEncryptedEQ. -func SecretAccessKeyEncrypted(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldSecretAccessKeyEncrypted, v)) -} - -// Prefix applies equality check predicate on the "prefix" field. It's identical to PrefixEQ. -func Prefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldPrefix, v)) -} - -// ForcePathStyle applies equality check predicate on the "force_path_style" field. It's identical to ForcePathStyleEQ. -func ForcePathStyle(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldForcePathStyle, v)) -} - -// UseSsl applies equality check predicate on the "use_ssl" field. It's identical to UseSslEQ. -func UseSsl(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldUseSsl, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldCreatedAt, v)) -} - -// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. -func UpdatedAt(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// ProfileIDEQ applies the EQ predicate on the "profile_id" field. -func ProfileIDEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldProfileID, v)) -} - -// ProfileIDNEQ applies the NEQ predicate on the "profile_id" field. -func ProfileIDNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldProfileID, v)) -} - -// ProfileIDIn applies the In predicate on the "profile_id" field. -func ProfileIDIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldProfileID, vs...)) -} - -// ProfileIDNotIn applies the NotIn predicate on the "profile_id" field. -func ProfileIDNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldProfileID, vs...)) -} - -// ProfileIDGT applies the GT predicate on the "profile_id" field. -func ProfileIDGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldProfileID, v)) -} - -// ProfileIDGTE applies the GTE predicate on the "profile_id" field. -func ProfileIDGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldProfileID, v)) -} - -// ProfileIDLT applies the LT predicate on the "profile_id" field. -func ProfileIDLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldProfileID, v)) -} - -// ProfileIDLTE applies the LTE predicate on the "profile_id" field. -func ProfileIDLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldProfileID, v)) -} - -// ProfileIDContains applies the Contains predicate on the "profile_id" field. -func ProfileIDContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldProfileID, v)) -} - -// ProfileIDHasPrefix applies the HasPrefix predicate on the "profile_id" field. -func ProfileIDHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldProfileID, v)) -} - -// ProfileIDHasSuffix applies the HasSuffix predicate on the "profile_id" field. -func ProfileIDHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldProfileID, v)) -} - -// ProfileIDEqualFold applies the EqualFold predicate on the "profile_id" field. -func ProfileIDEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldProfileID, v)) -} - -// ProfileIDContainsFold applies the ContainsFold predicate on the "profile_id" field. -func ProfileIDContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldProfileID, v)) -} - -// NameEQ applies the EQ predicate on the "name" field. -func NameEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldName, v)) -} - -// NameNEQ applies the NEQ predicate on the "name" field. -func NameNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldName, v)) -} - -// NameIn applies the In predicate on the "name" field. -func NameIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldName, vs...)) -} - -// NameNotIn applies the NotIn predicate on the "name" field. -func NameNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldName, vs...)) -} - -// NameGT applies the GT predicate on the "name" field. -func NameGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldName, v)) -} - -// NameGTE applies the GTE predicate on the "name" field. -func NameGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldName, v)) -} - -// NameLT applies the LT predicate on the "name" field. -func NameLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldName, v)) -} - -// NameLTE applies the LTE predicate on the "name" field. -func NameLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldName, v)) -} - -// NameContains applies the Contains predicate on the "name" field. -func NameContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldName, v)) -} - -// NameHasPrefix applies the HasPrefix predicate on the "name" field. -func NameHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldName, v)) -} - -// NameHasSuffix applies the HasSuffix predicate on the "name" field. -func NameHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldName, v)) -} - -// NameEqualFold applies the EqualFold predicate on the "name" field. -func NameEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldName, v)) -} - -// NameContainsFold applies the ContainsFold predicate on the "name" field. -func NameContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldName, v)) -} - -// IsActiveEQ applies the EQ predicate on the "is_active" field. -func IsActiveEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldIsActive, v)) -} - -// IsActiveNEQ applies the NEQ predicate on the "is_active" field. -func IsActiveNEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldIsActive, v)) -} - -// EnabledEQ applies the EQ predicate on the "enabled" field. -func EnabledEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldEnabled, v)) -} - -// EnabledNEQ applies the NEQ predicate on the "enabled" field. -func EnabledNEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldEnabled, v)) -} - -// EndpointEQ applies the EQ predicate on the "endpoint" field. -func EndpointEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldEndpoint, v)) -} - -// EndpointNEQ applies the NEQ predicate on the "endpoint" field. -func EndpointNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldEndpoint, v)) -} - -// EndpointIn applies the In predicate on the "endpoint" field. -func EndpointIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldEndpoint, vs...)) -} - -// EndpointNotIn applies the NotIn predicate on the "endpoint" field. -func EndpointNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldEndpoint, vs...)) -} - -// EndpointGT applies the GT predicate on the "endpoint" field. -func EndpointGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldEndpoint, v)) -} - -// EndpointGTE applies the GTE predicate on the "endpoint" field. -func EndpointGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldEndpoint, v)) -} - -// EndpointLT applies the LT predicate on the "endpoint" field. -func EndpointLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldEndpoint, v)) -} - -// EndpointLTE applies the LTE predicate on the "endpoint" field. -func EndpointLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldEndpoint, v)) -} - -// EndpointContains applies the Contains predicate on the "endpoint" field. -func EndpointContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldEndpoint, v)) -} - -// EndpointHasPrefix applies the HasPrefix predicate on the "endpoint" field. -func EndpointHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldEndpoint, v)) -} - -// EndpointHasSuffix applies the HasSuffix predicate on the "endpoint" field. -func EndpointHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldEndpoint, v)) -} - -// EndpointEqualFold applies the EqualFold predicate on the "endpoint" field. -func EndpointEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldEndpoint, v)) -} - -// EndpointContainsFold applies the ContainsFold predicate on the "endpoint" field. -func EndpointContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldEndpoint, v)) -} - -// RegionEQ applies the EQ predicate on the "region" field. -func RegionEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldRegion, v)) -} - -// RegionNEQ applies the NEQ predicate on the "region" field. -func RegionNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldRegion, v)) -} - -// RegionIn applies the In predicate on the "region" field. -func RegionIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldRegion, vs...)) -} - -// RegionNotIn applies the NotIn predicate on the "region" field. -func RegionNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldRegion, vs...)) -} - -// RegionGT applies the GT predicate on the "region" field. -func RegionGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldRegion, v)) -} - -// RegionGTE applies the GTE predicate on the "region" field. -func RegionGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldRegion, v)) -} - -// RegionLT applies the LT predicate on the "region" field. -func RegionLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldRegion, v)) -} - -// RegionLTE applies the LTE predicate on the "region" field. -func RegionLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldRegion, v)) -} - -// RegionContains applies the Contains predicate on the "region" field. -func RegionContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldRegion, v)) -} - -// RegionHasPrefix applies the HasPrefix predicate on the "region" field. -func RegionHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldRegion, v)) -} - -// RegionHasSuffix applies the HasSuffix predicate on the "region" field. -func RegionHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldRegion, v)) -} - -// RegionEqualFold applies the EqualFold predicate on the "region" field. -func RegionEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldRegion, v)) -} - -// RegionContainsFold applies the ContainsFold predicate on the "region" field. -func RegionContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldRegion, v)) -} - -// BucketEQ applies the EQ predicate on the "bucket" field. -func BucketEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldBucket, v)) -} - -// BucketNEQ applies the NEQ predicate on the "bucket" field. -func BucketNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldBucket, v)) -} - -// BucketIn applies the In predicate on the "bucket" field. -func BucketIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldBucket, vs...)) -} - -// BucketNotIn applies the NotIn predicate on the "bucket" field. -func BucketNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldBucket, vs...)) -} - -// BucketGT applies the GT predicate on the "bucket" field. -func BucketGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldBucket, v)) -} - -// BucketGTE applies the GTE predicate on the "bucket" field. -func BucketGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldBucket, v)) -} - -// BucketLT applies the LT predicate on the "bucket" field. -func BucketLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldBucket, v)) -} - -// BucketLTE applies the LTE predicate on the "bucket" field. -func BucketLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldBucket, v)) -} - -// BucketContains applies the Contains predicate on the "bucket" field. -func BucketContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldBucket, v)) -} - -// BucketHasPrefix applies the HasPrefix predicate on the "bucket" field. -func BucketHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldBucket, v)) -} - -// BucketHasSuffix applies the HasSuffix predicate on the "bucket" field. -func BucketHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldBucket, v)) -} - -// BucketEqualFold applies the EqualFold predicate on the "bucket" field. -func BucketEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldBucket, v)) -} - -// BucketContainsFold applies the ContainsFold predicate on the "bucket" field. -func BucketContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldBucket, v)) -} - -// AccessKeyIDEQ applies the EQ predicate on the "access_key_id" field. -func AccessKeyIDEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldAccessKeyID, v)) -} - -// AccessKeyIDNEQ applies the NEQ predicate on the "access_key_id" field. -func AccessKeyIDNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldAccessKeyID, v)) -} - -// AccessKeyIDIn applies the In predicate on the "access_key_id" field. -func AccessKeyIDIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldAccessKeyID, vs...)) -} - -// AccessKeyIDNotIn applies the NotIn predicate on the "access_key_id" field. -func AccessKeyIDNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldAccessKeyID, vs...)) -} - -// AccessKeyIDGT applies the GT predicate on the "access_key_id" field. -func AccessKeyIDGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldAccessKeyID, v)) -} - -// AccessKeyIDGTE applies the GTE predicate on the "access_key_id" field. -func AccessKeyIDGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldAccessKeyID, v)) -} - -// AccessKeyIDLT applies the LT predicate on the "access_key_id" field. -func AccessKeyIDLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldAccessKeyID, v)) -} - -// AccessKeyIDLTE applies the LTE predicate on the "access_key_id" field. -func AccessKeyIDLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldAccessKeyID, v)) -} - -// AccessKeyIDContains applies the Contains predicate on the "access_key_id" field. -func AccessKeyIDContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldAccessKeyID, v)) -} - -// AccessKeyIDHasPrefix applies the HasPrefix predicate on the "access_key_id" field. -func AccessKeyIDHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldAccessKeyID, v)) -} - -// AccessKeyIDHasSuffix applies the HasSuffix predicate on the "access_key_id" field. -func AccessKeyIDHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldAccessKeyID, v)) -} - -// AccessKeyIDEqualFold applies the EqualFold predicate on the "access_key_id" field. -func AccessKeyIDEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldAccessKeyID, v)) -} - -// AccessKeyIDContainsFold applies the ContainsFold predicate on the "access_key_id" field. -func AccessKeyIDContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldAccessKeyID, v)) -} - -// SecretAccessKeyEncryptedEQ applies the EQ predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedNEQ applies the NEQ predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedIn applies the In predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldSecretAccessKeyEncrypted, vs...)) -} - -// SecretAccessKeyEncryptedNotIn applies the NotIn predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldSecretAccessKeyEncrypted, vs...)) -} - -// SecretAccessKeyEncryptedGT applies the GT predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedGTE applies the GTE predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedLT applies the LT predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedLTE applies the LTE predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedContains applies the Contains predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedHasPrefix applies the HasPrefix predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedHasSuffix applies the HasSuffix predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedIsNil applies the IsNil predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedIsNil() predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIsNull(FieldSecretAccessKeyEncrypted)) -} - -// SecretAccessKeyEncryptedNotNil applies the NotNil predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedNotNil() predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotNull(FieldSecretAccessKeyEncrypted)) -} - -// SecretAccessKeyEncryptedEqualFold applies the EqualFold predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldSecretAccessKeyEncrypted, v)) -} - -// SecretAccessKeyEncryptedContainsFold applies the ContainsFold predicate on the "secret_access_key_encrypted" field. -func SecretAccessKeyEncryptedContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldSecretAccessKeyEncrypted, v)) -} - -// PrefixEQ applies the EQ predicate on the "prefix" field. -func PrefixEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldPrefix, v)) -} - -// PrefixNEQ applies the NEQ predicate on the "prefix" field. -func PrefixNEQ(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldPrefix, v)) -} - -// PrefixIn applies the In predicate on the "prefix" field. -func PrefixIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldPrefix, vs...)) -} - -// PrefixNotIn applies the NotIn predicate on the "prefix" field. -func PrefixNotIn(vs ...string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldPrefix, vs...)) -} - -// PrefixGT applies the GT predicate on the "prefix" field. -func PrefixGT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldPrefix, v)) -} - -// PrefixGTE applies the GTE predicate on the "prefix" field. -func PrefixGTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldPrefix, v)) -} - -// PrefixLT applies the LT predicate on the "prefix" field. -func PrefixLT(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldPrefix, v)) -} - -// PrefixLTE applies the LTE predicate on the "prefix" field. -func PrefixLTE(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldPrefix, v)) -} - -// PrefixContains applies the Contains predicate on the "prefix" field. -func PrefixContains(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContains(FieldPrefix, v)) -} - -// PrefixHasPrefix applies the HasPrefix predicate on the "prefix" field. -func PrefixHasPrefix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasPrefix(FieldPrefix, v)) -} - -// PrefixHasSuffix applies the HasSuffix predicate on the "prefix" field. -func PrefixHasSuffix(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldHasSuffix(FieldPrefix, v)) -} - -// PrefixEqualFold applies the EqualFold predicate on the "prefix" field. -func PrefixEqualFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEqualFold(FieldPrefix, v)) -} - -// PrefixContainsFold applies the ContainsFold predicate on the "prefix" field. -func PrefixContainsFold(v string) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldContainsFold(FieldPrefix, v)) -} - -// ForcePathStyleEQ applies the EQ predicate on the "force_path_style" field. -func ForcePathStyleEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldForcePathStyle, v)) -} - -// ForcePathStyleNEQ applies the NEQ predicate on the "force_path_style" field. -func ForcePathStyleNEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldForcePathStyle, v)) -} - -// UseSslEQ applies the EQ predicate on the "use_ssl" field. -func UseSslEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldUseSsl, v)) -} - -// UseSslNEQ applies the NEQ predicate on the "use_ssl" field. -func UseSslNEQ(v bool) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldUseSsl, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldCreatedAt, v)) -} - -// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. -func UpdatedAtEQ(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. -func UpdatedAtNEQ(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtIn applies the In predicate on the "updated_at" field. -func UpdatedAtIn(vs ...time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. -func UpdatedAtNotIn(vs ...time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldNotIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtGT applies the GT predicate on the "updated_at" field. -func UpdatedAtGT(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGT(FieldUpdatedAt, v)) -} - -// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. -func UpdatedAtGTE(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldGTE(FieldUpdatedAt, v)) -} - -// UpdatedAtLT applies the LT predicate on the "updated_at" field. -func UpdatedAtLT(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLT(FieldUpdatedAt, v)) -} - -// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. -func UpdatedAtLTE(v time.Time) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.FieldLTE(FieldUpdatedAt, v)) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.BackupS3Config) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.BackupS3Config) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.BackupS3Config) predicate.BackupS3Config { - return predicate.BackupS3Config(sql.NotPredicates(p)) -} diff --git a/backup/ent/backups3config_create.go b/backup/ent/backups3config_create.go deleted file mode 100644 index d4421b7a7..000000000 --- a/backup/ent/backups3config_create.go +++ /dev/null @@ -1,520 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" -) - -// BackupS3ConfigCreate is the builder for creating a BackupS3Config entity. -type BackupS3ConfigCreate struct { - config - mutation *BackupS3ConfigMutation - hooks []Hook -} - -// SetProfileID sets the "profile_id" field. -func (_c *BackupS3ConfigCreate) SetProfileID(v string) *BackupS3ConfigCreate { - _c.mutation.SetProfileID(v) - return _c -} - -// SetNillableProfileID sets the "profile_id" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableProfileID(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetProfileID(*v) - } - return _c -} - -// SetName sets the "name" field. -func (_c *BackupS3ConfigCreate) SetName(v string) *BackupS3ConfigCreate { - _c.mutation.SetName(v) - return _c -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableName(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetName(*v) - } - return _c -} - -// SetIsActive sets the "is_active" field. -func (_c *BackupS3ConfigCreate) SetIsActive(v bool) *BackupS3ConfigCreate { - _c.mutation.SetIsActive(v) - return _c -} - -// SetNillableIsActive sets the "is_active" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableIsActive(v *bool) *BackupS3ConfigCreate { - if v != nil { - _c.SetIsActive(*v) - } - return _c -} - -// SetEnabled sets the "enabled" field. -func (_c *BackupS3ConfigCreate) SetEnabled(v bool) *BackupS3ConfigCreate { - _c.mutation.SetEnabled(v) - return _c -} - -// SetNillableEnabled sets the "enabled" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableEnabled(v *bool) *BackupS3ConfigCreate { - if v != nil { - _c.SetEnabled(*v) - } - return _c -} - -// SetEndpoint sets the "endpoint" field. -func (_c *BackupS3ConfigCreate) SetEndpoint(v string) *BackupS3ConfigCreate { - _c.mutation.SetEndpoint(v) - return _c -} - -// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableEndpoint(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetEndpoint(*v) - } - return _c -} - -// SetRegion sets the "region" field. -func (_c *BackupS3ConfigCreate) SetRegion(v string) *BackupS3ConfigCreate { - _c.mutation.SetRegion(v) - return _c -} - -// SetNillableRegion sets the "region" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableRegion(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetRegion(*v) - } - return _c -} - -// SetBucket sets the "bucket" field. -func (_c *BackupS3ConfigCreate) SetBucket(v string) *BackupS3ConfigCreate { - _c.mutation.SetBucket(v) - return _c -} - -// SetNillableBucket sets the "bucket" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableBucket(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetBucket(*v) - } - return _c -} - -// SetAccessKeyID sets the "access_key_id" field. -func (_c *BackupS3ConfigCreate) SetAccessKeyID(v string) *BackupS3ConfigCreate { - _c.mutation.SetAccessKeyID(v) - return _c -} - -// SetNillableAccessKeyID sets the "access_key_id" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableAccessKeyID(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetAccessKeyID(*v) - } - return _c -} - -// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. -func (_c *BackupS3ConfigCreate) SetSecretAccessKeyEncrypted(v string) *BackupS3ConfigCreate { - _c.mutation.SetSecretAccessKeyEncrypted(v) - return _c -} - -// SetNillableSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableSecretAccessKeyEncrypted(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetSecretAccessKeyEncrypted(*v) - } - return _c -} - -// SetPrefix sets the "prefix" field. -func (_c *BackupS3ConfigCreate) SetPrefix(v string) *BackupS3ConfigCreate { - _c.mutation.SetPrefix(v) - return _c -} - -// SetNillablePrefix sets the "prefix" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillablePrefix(v *string) *BackupS3ConfigCreate { - if v != nil { - _c.SetPrefix(*v) - } - return _c -} - -// SetForcePathStyle sets the "force_path_style" field. -func (_c *BackupS3ConfigCreate) SetForcePathStyle(v bool) *BackupS3ConfigCreate { - _c.mutation.SetForcePathStyle(v) - return _c -} - -// SetNillableForcePathStyle sets the "force_path_style" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableForcePathStyle(v *bool) *BackupS3ConfigCreate { - if v != nil { - _c.SetForcePathStyle(*v) - } - return _c -} - -// SetUseSsl sets the "use_ssl" field. -func (_c *BackupS3ConfigCreate) SetUseSsl(v bool) *BackupS3ConfigCreate { - _c.mutation.SetUseSsl(v) - return _c -} - -// SetNillableUseSsl sets the "use_ssl" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableUseSsl(v *bool) *BackupS3ConfigCreate { - if v != nil { - _c.SetUseSsl(*v) - } - return _c -} - -// SetCreatedAt sets the "created_at" field. -func (_c *BackupS3ConfigCreate) SetCreatedAt(v time.Time) *BackupS3ConfigCreate { - _c.mutation.SetCreatedAt(v) - return _c -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableCreatedAt(v *time.Time) *BackupS3ConfigCreate { - if v != nil { - _c.SetCreatedAt(*v) - } - return _c -} - -// SetUpdatedAt sets the "updated_at" field. -func (_c *BackupS3ConfigCreate) SetUpdatedAt(v time.Time) *BackupS3ConfigCreate { - _c.mutation.SetUpdatedAt(v) - return _c -} - -// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (_c *BackupS3ConfigCreate) SetNillableUpdatedAt(v *time.Time) *BackupS3ConfigCreate { - if v != nil { - _c.SetUpdatedAt(*v) - } - return _c -} - -// Mutation returns the BackupS3ConfigMutation object of the builder. -func (_c *BackupS3ConfigCreate) Mutation() *BackupS3ConfigMutation { - return _c.mutation -} - -// Save creates the BackupS3Config in the database. -func (_c *BackupS3ConfigCreate) Save(ctx context.Context) (*BackupS3Config, error) { - _c.defaults() - return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (_c *BackupS3ConfigCreate) SaveX(ctx context.Context) *BackupS3Config { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupS3ConfigCreate) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupS3ConfigCreate) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_c *BackupS3ConfigCreate) defaults() { - if _, ok := _c.mutation.ProfileID(); !ok { - v := backups3config.DefaultProfileID - _c.mutation.SetProfileID(v) - } - if _, ok := _c.mutation.Name(); !ok { - v := backups3config.DefaultName - _c.mutation.SetName(v) - } - if _, ok := _c.mutation.IsActive(); !ok { - v := backups3config.DefaultIsActive - _c.mutation.SetIsActive(v) - } - if _, ok := _c.mutation.Enabled(); !ok { - v := backups3config.DefaultEnabled - _c.mutation.SetEnabled(v) - } - if _, ok := _c.mutation.Endpoint(); !ok { - v := backups3config.DefaultEndpoint - _c.mutation.SetEndpoint(v) - } - if _, ok := _c.mutation.Region(); !ok { - v := backups3config.DefaultRegion - _c.mutation.SetRegion(v) - } - if _, ok := _c.mutation.Bucket(); !ok { - v := backups3config.DefaultBucket - _c.mutation.SetBucket(v) - } - if _, ok := _c.mutation.AccessKeyID(); !ok { - v := backups3config.DefaultAccessKeyID - _c.mutation.SetAccessKeyID(v) - } - if _, ok := _c.mutation.Prefix(); !ok { - v := backups3config.DefaultPrefix - _c.mutation.SetPrefix(v) - } - if _, ok := _c.mutation.ForcePathStyle(); !ok { - v := backups3config.DefaultForcePathStyle - _c.mutation.SetForcePathStyle(v) - } - if _, ok := _c.mutation.UseSsl(); !ok { - v := backups3config.DefaultUseSsl - _c.mutation.SetUseSsl(v) - } - if _, ok := _c.mutation.CreatedAt(); !ok { - v := backups3config.DefaultCreatedAt() - _c.mutation.SetCreatedAt(v) - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - v := backups3config.DefaultUpdatedAt() - _c.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_c *BackupS3ConfigCreate) check() error { - if _, ok := _c.mutation.ProfileID(); !ok { - return &ValidationError{Name: "profile_id", err: errors.New(`ent: missing required field "BackupS3Config.profile_id"`)} - } - if _, ok := _c.mutation.Name(); !ok { - return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "BackupS3Config.name"`)} - } - if _, ok := _c.mutation.IsActive(); !ok { - return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "BackupS3Config.is_active"`)} - } - if _, ok := _c.mutation.Enabled(); !ok { - return &ValidationError{Name: "enabled", err: errors.New(`ent: missing required field "BackupS3Config.enabled"`)} - } - if _, ok := _c.mutation.Endpoint(); !ok { - return &ValidationError{Name: "endpoint", err: errors.New(`ent: missing required field "BackupS3Config.endpoint"`)} - } - if _, ok := _c.mutation.Region(); !ok { - return &ValidationError{Name: "region", err: errors.New(`ent: missing required field "BackupS3Config.region"`)} - } - if _, ok := _c.mutation.Bucket(); !ok { - return &ValidationError{Name: "bucket", err: errors.New(`ent: missing required field "BackupS3Config.bucket"`)} - } - if _, ok := _c.mutation.AccessKeyID(); !ok { - return &ValidationError{Name: "access_key_id", err: errors.New(`ent: missing required field "BackupS3Config.access_key_id"`)} - } - if _, ok := _c.mutation.Prefix(); !ok { - return &ValidationError{Name: "prefix", err: errors.New(`ent: missing required field "BackupS3Config.prefix"`)} - } - if _, ok := _c.mutation.ForcePathStyle(); !ok { - return &ValidationError{Name: "force_path_style", err: errors.New(`ent: missing required field "BackupS3Config.force_path_style"`)} - } - if _, ok := _c.mutation.UseSsl(); !ok { - return &ValidationError{Name: "use_ssl", err: errors.New(`ent: missing required field "BackupS3Config.use_ssl"`)} - } - if _, ok := _c.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupS3Config.created_at"`)} - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupS3Config.updated_at"`)} - } - return nil -} - -func (_c *BackupS3ConfigCreate) sqlSave(ctx context.Context) (*BackupS3Config, error) { - if err := _c.check(); err != nil { - return nil, err - } - _node, _spec := _c.createSpec() - if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - _c.mutation.id = &_node.ID - _c.mutation.done = true - return _node, nil -} - -func (_c *BackupS3ConfigCreate) createSpec() (*BackupS3Config, *sqlgraph.CreateSpec) { - var ( - _node = &BackupS3Config{config: _c.config} - _spec = sqlgraph.NewCreateSpec(backups3config.Table, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) - ) - if value, ok := _c.mutation.ProfileID(); ok { - _spec.SetField(backups3config.FieldProfileID, field.TypeString, value) - _node.ProfileID = value - } - if value, ok := _c.mutation.Name(); ok { - _spec.SetField(backups3config.FieldName, field.TypeString, value) - _node.Name = value - } - if value, ok := _c.mutation.IsActive(); ok { - _spec.SetField(backups3config.FieldIsActive, field.TypeBool, value) - _node.IsActive = value - } - if value, ok := _c.mutation.Enabled(); ok { - _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) - _node.Enabled = value - } - if value, ok := _c.mutation.Endpoint(); ok { - _spec.SetField(backups3config.FieldEndpoint, field.TypeString, value) - _node.Endpoint = value - } - if value, ok := _c.mutation.Region(); ok { - _spec.SetField(backups3config.FieldRegion, field.TypeString, value) - _node.Region = value - } - if value, ok := _c.mutation.Bucket(); ok { - _spec.SetField(backups3config.FieldBucket, field.TypeString, value) - _node.Bucket = value - } - if value, ok := _c.mutation.AccessKeyID(); ok { - _spec.SetField(backups3config.FieldAccessKeyID, field.TypeString, value) - _node.AccessKeyID = value - } - if value, ok := _c.mutation.SecretAccessKeyEncrypted(); ok { - _spec.SetField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString, value) - _node.SecretAccessKeyEncrypted = value - } - if value, ok := _c.mutation.Prefix(); ok { - _spec.SetField(backups3config.FieldPrefix, field.TypeString, value) - _node.Prefix = value - } - if value, ok := _c.mutation.ForcePathStyle(); ok { - _spec.SetField(backups3config.FieldForcePathStyle, field.TypeBool, value) - _node.ForcePathStyle = value - } - if value, ok := _c.mutation.UseSsl(); ok { - _spec.SetField(backups3config.FieldUseSsl, field.TypeBool, value) - _node.UseSsl = value - } - if value, ok := _c.mutation.CreatedAt(); ok { - _spec.SetField(backups3config.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if value, ok := _c.mutation.UpdatedAt(); ok { - _spec.SetField(backups3config.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = value - } - return _node, _spec -} - -// BackupS3ConfigCreateBulk is the builder for creating many BackupS3Config entities in bulk. -type BackupS3ConfigCreateBulk struct { - config - err error - builders []*BackupS3ConfigCreate -} - -// Save creates the BackupS3Config entities in the database. -func (_c *BackupS3ConfigCreateBulk) Save(ctx context.Context) ([]*BackupS3Config, error) { - if _c.err != nil { - return nil, _c.err - } - specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) - nodes := make([]*BackupS3Config, len(_c.builders)) - mutators := make([]Mutator, len(_c.builders)) - for i := range _c.builders { - func(i int, root context.Context) { - builder := _c.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BackupS3ConfigMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (_c *BackupS3ConfigCreateBulk) SaveX(ctx context.Context) []*BackupS3Config { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupS3ConfigCreateBulk) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupS3ConfigCreateBulk) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backups3config_delete.go b/backup/ent/backups3config_delete.go deleted file mode 100644 index 0cfbbc196..000000000 --- a/backup/ent/backups3config_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupS3ConfigDelete is the builder for deleting a BackupS3Config entity. -type BackupS3ConfigDelete struct { - config - hooks []Hook - mutation *BackupS3ConfigMutation -} - -// Where appends a list predicates to the BackupS3ConfigDelete builder. -func (_d *BackupS3ConfigDelete) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigDelete { - _d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (_d *BackupS3ConfigDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupS3ConfigDelete) ExecX(ctx context.Context) int { - n, err := _d.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (_d *BackupS3ConfigDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(backups3config.Table, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) - if ps := _d.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - _d.mutation.done = true - return affected, err -} - -// BackupS3ConfigDeleteOne is the builder for deleting a single BackupS3Config entity. -type BackupS3ConfigDeleteOne struct { - _d *BackupS3ConfigDelete -} - -// Where appends a list predicates to the BackupS3ConfigDelete builder. -func (_d *BackupS3ConfigDeleteOne) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigDeleteOne { - _d._d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query. -func (_d *BackupS3ConfigDeleteOne) Exec(ctx context.Context) error { - n, err := _d._d.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{backups3config.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupS3ConfigDeleteOne) ExecX(ctx context.Context) { - if err := _d.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backups3config_query.go b/backup/ent/backups3config_query.go deleted file mode 100644 index 9fdd0f128..000000000 --- a/backup/ent/backups3config_query.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupS3ConfigQuery is the builder for querying BackupS3Config entities. -type BackupS3ConfigQuery struct { - config - ctx *QueryContext - order []backups3config.OrderOption - inters []Interceptor - predicates []predicate.BackupS3Config - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the BackupS3ConfigQuery builder. -func (_q *BackupS3ConfigQuery) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigQuery { - _q.predicates = append(_q.predicates, ps...) - return _q -} - -// Limit the number of records to be returned by this query. -func (_q *BackupS3ConfigQuery) Limit(limit int) *BackupS3ConfigQuery { - _q.ctx.Limit = &limit - return _q -} - -// Offset to start from. -func (_q *BackupS3ConfigQuery) Offset(offset int) *BackupS3ConfigQuery { - _q.ctx.Offset = &offset - return _q -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (_q *BackupS3ConfigQuery) Unique(unique bool) *BackupS3ConfigQuery { - _q.ctx.Unique = &unique - return _q -} - -// Order specifies how the records should be ordered. -func (_q *BackupS3ConfigQuery) Order(o ...backups3config.OrderOption) *BackupS3ConfigQuery { - _q.order = append(_q.order, o...) - return _q -} - -// First returns the first BackupS3Config entity from the query. -// Returns a *NotFoundError when no BackupS3Config was found. -func (_q *BackupS3ConfigQuery) First(ctx context.Context) (*BackupS3Config, error) { - nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{backups3config.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) FirstX(ctx context.Context) *BackupS3Config { - node, err := _q.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first BackupS3Config ID from the query. -// Returns a *NotFoundError when no BackupS3Config ID was found. -func (_q *BackupS3ConfigQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{backups3config.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) FirstIDX(ctx context.Context) int { - id, err := _q.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single BackupS3Config entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one BackupS3Config entity is found. -// Returns a *NotFoundError when no BackupS3Config entities are found. -func (_q *BackupS3ConfigQuery) Only(ctx context.Context) (*BackupS3Config, error) { - nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{backups3config.Label} - default: - return nil, &NotSingularError{backups3config.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) OnlyX(ctx context.Context) *BackupS3Config { - node, err := _q.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only BackupS3Config ID in the query. -// Returns a *NotSingularError when more than one BackupS3Config ID is found. -// Returns a *NotFoundError when no entities are found. -func (_q *BackupS3ConfigQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{backups3config.Label} - default: - err = &NotSingularError{backups3config.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) OnlyIDX(ctx context.Context) int { - id, err := _q.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of BackupS3Configs. -func (_q *BackupS3ConfigQuery) All(ctx context.Context) ([]*BackupS3Config, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*BackupS3Config, *BackupS3ConfigQuery]() - return withInterceptors[[]*BackupS3Config](ctx, _q, qr, _q.inters) -} - -// AllX is like All, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) AllX(ctx context.Context) []*BackupS3Config { - nodes, err := _q.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of BackupS3Config IDs. -func (_q *BackupS3ConfigQuery) IDs(ctx context.Context) (ids []int, err error) { - if _q.ctx.Unique == nil && _q.path != nil { - _q.Unique(true) - } - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) - if err = _q.Select(backups3config.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) IDsX(ctx context.Context) []int { - ids, err := _q.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (_q *BackupS3ConfigQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) - if err := _q.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, _q, querierCount[*BackupS3ConfigQuery](), _q.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) CountX(ctx context.Context) int { - count, err := _q.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (_q *BackupS3ConfigQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) - switch _, err := _q.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (_q *BackupS3ConfigQuery) ExistX(ctx context.Context) bool { - exist, err := _q.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the BackupS3ConfigQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (_q *BackupS3ConfigQuery) Clone() *BackupS3ConfigQuery { - if _q == nil { - return nil - } - return &BackupS3ConfigQuery{ - config: _q.config, - ctx: _q.ctx.Clone(), - order: append([]backups3config.OrderOption{}, _q.order...), - inters: append([]Interceptor{}, _q.inters...), - predicates: append([]predicate.BackupS3Config{}, _q.predicates...), - // clone intermediate query. - sql: _q.sql.Clone(), - path: _q.path, - } -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// ProfileID string `json:"profile_id,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.BackupS3Config.Query(). -// GroupBy(backups3config.FieldProfileID). -// Aggregate(ent.Count()). -// Scan(ctx, &v) -func (_q *BackupS3ConfigQuery) GroupBy(field string, fields ...string) *BackupS3ConfigGroupBy { - _q.ctx.Fields = append([]string{field}, fields...) - grbuild := &BackupS3ConfigGroupBy{build: _q} - grbuild.flds = &_q.ctx.Fields - grbuild.label = backups3config.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// ProfileID string `json:"profile_id,omitempty"` -// } -// -// client.BackupS3Config.Query(). -// Select(backups3config.FieldProfileID). -// Scan(ctx, &v) -func (_q *BackupS3ConfigQuery) Select(fields ...string) *BackupS3ConfigSelect { - _q.ctx.Fields = append(_q.ctx.Fields, fields...) - sbuild := &BackupS3ConfigSelect{BackupS3ConfigQuery: _q} - sbuild.label = backups3config.Label - sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a BackupS3ConfigSelect configured with the given aggregations. -func (_q *BackupS3ConfigQuery) Aggregate(fns ...AggregateFunc) *BackupS3ConfigSelect { - return _q.Select().Aggregate(fns...) -} - -func (_q *BackupS3ConfigQuery) prepareQuery(ctx context.Context) error { - for _, inter := range _q.inters { - if inter == nil { - return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, _q); err != nil { - return err - } - } - } - for _, f := range _q.ctx.Fields { - if !backups3config.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - } - if _q.path != nil { - prev, err := _q.path(ctx) - if err != nil { - return err - } - _q.sql = prev - } - return nil -} - -func (_q *BackupS3ConfigQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupS3Config, error) { - var ( - nodes = []*BackupS3Config{} - _spec = _q.querySpec() - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*BackupS3Config).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &BackupS3Config{config: _q.config} - nodes = append(nodes, node) - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - return nodes, nil -} - -func (_q *BackupS3ConfigQuery) sqlCount(ctx context.Context) (int, error) { - _spec := _q.querySpec() - _spec.Node.Columns = _q.ctx.Fields - if len(_q.ctx.Fields) > 0 { - _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique - } - return sqlgraph.CountNodes(ctx, _q.driver, _spec) -} - -func (_q *BackupS3ConfigQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(backups3config.Table, backups3config.Columns, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) - _spec.From = _q.sql - if unique := _q.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if _q.path != nil { - _spec.Unique = true - } - if fields := _q.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backups3config.FieldID) - for i := range fields { - if fields[i] != backups3config.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - } - if ps := _q.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := _q.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := _q.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := _q.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (_q *BackupS3ConfigQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(_q.driver.Dialect()) - t1 := builder.Table(backups3config.Table) - columns := _q.ctx.Fields - if len(columns) == 0 { - columns = backups3config.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if _q.sql != nil { - selector = _q.sql - selector.Select(selector.Columns(columns...)...) - } - if _q.ctx.Unique != nil && *_q.ctx.Unique { - selector.Distinct() - } - for _, p := range _q.predicates { - p(selector) - } - for _, p := range _q.order { - p(selector) - } - if offset := _q.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := _q.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// BackupS3ConfigGroupBy is the group-by builder for BackupS3Config entities. -type BackupS3ConfigGroupBy struct { - selector - build *BackupS3ConfigQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (_g *BackupS3ConfigGroupBy) Aggregate(fns ...AggregateFunc) *BackupS3ConfigGroupBy { - _g.fns = append(_g.fns, fns...) - return _g -} - -// Scan applies the selector query and scans the result into the given value. -func (_g *BackupS3ConfigGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) - if err := _g.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupS3ConfigQuery, *BackupS3ConfigGroupBy](ctx, _g.build, _g, _g.build.inters, v) -} - -func (_g *BackupS3ConfigGroupBy) sqlScan(ctx context.Context, root *BackupS3ConfigQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(_g.fns)) - for _, fn := range _g.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) - for _, f := range *_g.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*_g.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// BackupS3ConfigSelect is the builder for selecting fields of BackupS3Config entities. -type BackupS3ConfigSelect struct { - *BackupS3ConfigQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (_s *BackupS3ConfigSelect) Aggregate(fns ...AggregateFunc) *BackupS3ConfigSelect { - _s.fns = append(_s.fns, fns...) - return _s -} - -// Scan applies the selector query and scans the result into the given value. -func (_s *BackupS3ConfigSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) - if err := _s.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupS3ConfigQuery, *BackupS3ConfigSelect](ctx, _s.BackupS3ConfigQuery, _s, _s.inters, v) -} - -func (_s *BackupS3ConfigSelect) sqlScan(ctx context.Context, root *BackupS3ConfigQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(_s.fns)) - for _, fn := range _s.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*_s.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _s.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/backup/ent/backups3config_update.go b/backup/ent/backups3config_update.go deleted file mode 100644 index 048e93d03..000000000 --- a/backup/ent/backups3config_update.go +++ /dev/null @@ -1,638 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupS3ConfigUpdate is the builder for updating BackupS3Config entities. -type BackupS3ConfigUpdate struct { - config - hooks []Hook - mutation *BackupS3ConfigMutation -} - -// Where appends a list predicates to the BackupS3ConfigUpdate builder. -func (_u *BackupS3ConfigUpdate) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigUpdate { - _u.mutation.Where(ps...) - return _u -} - -// SetProfileID sets the "profile_id" field. -func (_u *BackupS3ConfigUpdate) SetProfileID(v string) *BackupS3ConfigUpdate { - _u.mutation.SetProfileID(v) - return _u -} - -// SetNillableProfileID sets the "profile_id" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableProfileID(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetProfileID(*v) - } - return _u -} - -// SetName sets the "name" field. -func (_u *BackupS3ConfigUpdate) SetName(v string) *BackupS3ConfigUpdate { - _u.mutation.SetName(v) - return _u -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableName(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetName(*v) - } - return _u -} - -// SetIsActive sets the "is_active" field. -func (_u *BackupS3ConfigUpdate) SetIsActive(v bool) *BackupS3ConfigUpdate { - _u.mutation.SetIsActive(v) - return _u -} - -// SetNillableIsActive sets the "is_active" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableIsActive(v *bool) *BackupS3ConfigUpdate { - if v != nil { - _u.SetIsActive(*v) - } - return _u -} - -// SetEnabled sets the "enabled" field. -func (_u *BackupS3ConfigUpdate) SetEnabled(v bool) *BackupS3ConfigUpdate { - _u.mutation.SetEnabled(v) - return _u -} - -// SetNillableEnabled sets the "enabled" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableEnabled(v *bool) *BackupS3ConfigUpdate { - if v != nil { - _u.SetEnabled(*v) - } - return _u -} - -// SetEndpoint sets the "endpoint" field. -func (_u *BackupS3ConfigUpdate) SetEndpoint(v string) *BackupS3ConfigUpdate { - _u.mutation.SetEndpoint(v) - return _u -} - -// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableEndpoint(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetEndpoint(*v) - } - return _u -} - -// SetRegion sets the "region" field. -func (_u *BackupS3ConfigUpdate) SetRegion(v string) *BackupS3ConfigUpdate { - _u.mutation.SetRegion(v) - return _u -} - -// SetNillableRegion sets the "region" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableRegion(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetRegion(*v) - } - return _u -} - -// SetBucket sets the "bucket" field. -func (_u *BackupS3ConfigUpdate) SetBucket(v string) *BackupS3ConfigUpdate { - _u.mutation.SetBucket(v) - return _u -} - -// SetNillableBucket sets the "bucket" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableBucket(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetBucket(*v) - } - return _u -} - -// SetAccessKeyID sets the "access_key_id" field. -func (_u *BackupS3ConfigUpdate) SetAccessKeyID(v string) *BackupS3ConfigUpdate { - _u.mutation.SetAccessKeyID(v) - return _u -} - -// SetNillableAccessKeyID sets the "access_key_id" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableAccessKeyID(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetAccessKeyID(*v) - } - return _u -} - -// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. -func (_u *BackupS3ConfigUpdate) SetSecretAccessKeyEncrypted(v string) *BackupS3ConfigUpdate { - _u.mutation.SetSecretAccessKeyEncrypted(v) - return _u -} - -// SetNillableSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableSecretAccessKeyEncrypted(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetSecretAccessKeyEncrypted(*v) - } - return _u -} - -// ClearSecretAccessKeyEncrypted clears the value of the "secret_access_key_encrypted" field. -func (_u *BackupS3ConfigUpdate) ClearSecretAccessKeyEncrypted() *BackupS3ConfigUpdate { - _u.mutation.ClearSecretAccessKeyEncrypted() - return _u -} - -// SetPrefix sets the "prefix" field. -func (_u *BackupS3ConfigUpdate) SetPrefix(v string) *BackupS3ConfigUpdate { - _u.mutation.SetPrefix(v) - return _u -} - -// SetNillablePrefix sets the "prefix" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillablePrefix(v *string) *BackupS3ConfigUpdate { - if v != nil { - _u.SetPrefix(*v) - } - return _u -} - -// SetForcePathStyle sets the "force_path_style" field. -func (_u *BackupS3ConfigUpdate) SetForcePathStyle(v bool) *BackupS3ConfigUpdate { - _u.mutation.SetForcePathStyle(v) - return _u -} - -// SetNillableForcePathStyle sets the "force_path_style" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableForcePathStyle(v *bool) *BackupS3ConfigUpdate { - if v != nil { - _u.SetForcePathStyle(*v) - } - return _u -} - -// SetUseSsl sets the "use_ssl" field. -func (_u *BackupS3ConfigUpdate) SetUseSsl(v bool) *BackupS3ConfigUpdate { - _u.mutation.SetUseSsl(v) - return _u -} - -// SetNillableUseSsl sets the "use_ssl" field if the given value is not nil. -func (_u *BackupS3ConfigUpdate) SetNillableUseSsl(v *bool) *BackupS3ConfigUpdate { - if v != nil { - _u.SetUseSsl(*v) - } - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupS3ConfigUpdate) SetUpdatedAt(v time.Time) *BackupS3ConfigUpdate { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// Mutation returns the BackupS3ConfigMutation object of the builder. -func (_u *BackupS3ConfigUpdate) Mutation() *BackupS3ConfigMutation { - return _u.mutation -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (_u *BackupS3ConfigUpdate) Save(ctx context.Context) (int, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupS3ConfigUpdate) SaveX(ctx context.Context) int { - affected, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (_u *BackupS3ConfigUpdate) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupS3ConfigUpdate) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupS3ConfigUpdate) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backups3config.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -func (_u *BackupS3ConfigUpdate) sqlSave(ctx context.Context) (_node int, err error) { - _spec := sqlgraph.NewUpdateSpec(backups3config.Table, backups3config.Columns, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.ProfileID(); ok { - _spec.SetField(backups3config.FieldProfileID, field.TypeString, value) - } - if value, ok := _u.mutation.Name(); ok { - _spec.SetField(backups3config.FieldName, field.TypeString, value) - } - if value, ok := _u.mutation.IsActive(); ok { - _spec.SetField(backups3config.FieldIsActive, field.TypeBool, value) - } - if value, ok := _u.mutation.Enabled(); ok { - _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) - } - if value, ok := _u.mutation.Endpoint(); ok { - _spec.SetField(backups3config.FieldEndpoint, field.TypeString, value) - } - if value, ok := _u.mutation.Region(); ok { - _spec.SetField(backups3config.FieldRegion, field.TypeString, value) - } - if value, ok := _u.mutation.Bucket(); ok { - _spec.SetField(backups3config.FieldBucket, field.TypeString, value) - } - if value, ok := _u.mutation.AccessKeyID(); ok { - _spec.SetField(backups3config.FieldAccessKeyID, field.TypeString, value) - } - if value, ok := _u.mutation.SecretAccessKeyEncrypted(); ok { - _spec.SetField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString, value) - } - if _u.mutation.SecretAccessKeyEncryptedCleared() { - _spec.ClearField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString) - } - if value, ok := _u.mutation.Prefix(); ok { - _spec.SetField(backups3config.FieldPrefix, field.TypeString, value) - } - if value, ok := _u.mutation.ForcePathStyle(); ok { - _spec.SetField(backups3config.FieldForcePathStyle, field.TypeBool, value) - } - if value, ok := _u.mutation.UseSsl(); ok { - _spec.SetField(backups3config.FieldUseSsl, field.TypeBool, value) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backups3config.FieldUpdatedAt, field.TypeTime, value) - } - if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backups3config.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - _u.mutation.done = true - return _node, nil -} - -// BackupS3ConfigUpdateOne is the builder for updating a single BackupS3Config entity. -type BackupS3ConfigUpdateOne struct { - config - fields []string - hooks []Hook - mutation *BackupS3ConfigMutation -} - -// SetProfileID sets the "profile_id" field. -func (_u *BackupS3ConfigUpdateOne) SetProfileID(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetProfileID(v) - return _u -} - -// SetNillableProfileID sets the "profile_id" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableProfileID(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetProfileID(*v) - } - return _u -} - -// SetName sets the "name" field. -func (_u *BackupS3ConfigUpdateOne) SetName(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetName(v) - return _u -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableName(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetName(*v) - } - return _u -} - -// SetIsActive sets the "is_active" field. -func (_u *BackupS3ConfigUpdateOne) SetIsActive(v bool) *BackupS3ConfigUpdateOne { - _u.mutation.SetIsActive(v) - return _u -} - -// SetNillableIsActive sets the "is_active" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableIsActive(v *bool) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetIsActive(*v) - } - return _u -} - -// SetEnabled sets the "enabled" field. -func (_u *BackupS3ConfigUpdateOne) SetEnabled(v bool) *BackupS3ConfigUpdateOne { - _u.mutation.SetEnabled(v) - return _u -} - -// SetNillableEnabled sets the "enabled" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableEnabled(v *bool) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetEnabled(*v) - } - return _u -} - -// SetEndpoint sets the "endpoint" field. -func (_u *BackupS3ConfigUpdateOne) SetEndpoint(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetEndpoint(v) - return _u -} - -// SetNillableEndpoint sets the "endpoint" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableEndpoint(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetEndpoint(*v) - } - return _u -} - -// SetRegion sets the "region" field. -func (_u *BackupS3ConfigUpdateOne) SetRegion(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetRegion(v) - return _u -} - -// SetNillableRegion sets the "region" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableRegion(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetRegion(*v) - } - return _u -} - -// SetBucket sets the "bucket" field. -func (_u *BackupS3ConfigUpdateOne) SetBucket(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetBucket(v) - return _u -} - -// SetNillableBucket sets the "bucket" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableBucket(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetBucket(*v) - } - return _u -} - -// SetAccessKeyID sets the "access_key_id" field. -func (_u *BackupS3ConfigUpdateOne) SetAccessKeyID(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetAccessKeyID(v) - return _u -} - -// SetNillableAccessKeyID sets the "access_key_id" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableAccessKeyID(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetAccessKeyID(*v) - } - return _u -} - -// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. -func (_u *BackupS3ConfigUpdateOne) SetSecretAccessKeyEncrypted(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetSecretAccessKeyEncrypted(v) - return _u -} - -// SetNillableSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableSecretAccessKeyEncrypted(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetSecretAccessKeyEncrypted(*v) - } - return _u -} - -// ClearSecretAccessKeyEncrypted clears the value of the "secret_access_key_encrypted" field. -func (_u *BackupS3ConfigUpdateOne) ClearSecretAccessKeyEncrypted() *BackupS3ConfigUpdateOne { - _u.mutation.ClearSecretAccessKeyEncrypted() - return _u -} - -// SetPrefix sets the "prefix" field. -func (_u *BackupS3ConfigUpdateOne) SetPrefix(v string) *BackupS3ConfigUpdateOne { - _u.mutation.SetPrefix(v) - return _u -} - -// SetNillablePrefix sets the "prefix" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillablePrefix(v *string) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetPrefix(*v) - } - return _u -} - -// SetForcePathStyle sets the "force_path_style" field. -func (_u *BackupS3ConfigUpdateOne) SetForcePathStyle(v bool) *BackupS3ConfigUpdateOne { - _u.mutation.SetForcePathStyle(v) - return _u -} - -// SetNillableForcePathStyle sets the "force_path_style" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableForcePathStyle(v *bool) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetForcePathStyle(*v) - } - return _u -} - -// SetUseSsl sets the "use_ssl" field. -func (_u *BackupS3ConfigUpdateOne) SetUseSsl(v bool) *BackupS3ConfigUpdateOne { - _u.mutation.SetUseSsl(v) - return _u -} - -// SetNillableUseSsl sets the "use_ssl" field if the given value is not nil. -func (_u *BackupS3ConfigUpdateOne) SetNillableUseSsl(v *bool) *BackupS3ConfigUpdateOne { - if v != nil { - _u.SetUseSsl(*v) - } - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupS3ConfigUpdateOne) SetUpdatedAt(v time.Time) *BackupS3ConfigUpdateOne { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// Mutation returns the BackupS3ConfigMutation object of the builder. -func (_u *BackupS3ConfigUpdateOne) Mutation() *BackupS3ConfigMutation { - return _u.mutation -} - -// Where appends a list predicates to the BackupS3ConfigUpdate builder. -func (_u *BackupS3ConfigUpdateOne) Where(ps ...predicate.BackupS3Config) *BackupS3ConfigUpdateOne { - _u.mutation.Where(ps...) - return _u -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (_u *BackupS3ConfigUpdateOne) Select(field string, fields ...string) *BackupS3ConfigUpdateOne { - _u.fields = append([]string{field}, fields...) - return _u -} - -// Save executes the query and returns the updated BackupS3Config entity. -func (_u *BackupS3ConfigUpdateOne) Save(ctx context.Context) (*BackupS3Config, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupS3ConfigUpdateOne) SaveX(ctx context.Context) *BackupS3Config { - node, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (_u *BackupS3ConfigUpdateOne) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupS3ConfigUpdateOne) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupS3ConfigUpdateOne) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backups3config.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -func (_u *BackupS3ConfigUpdateOne) sqlSave(ctx context.Context) (_node *BackupS3Config, err error) { - _spec := sqlgraph.NewUpdateSpec(backups3config.Table, backups3config.Columns, sqlgraph.NewFieldSpec(backups3config.FieldID, field.TypeInt)) - id, ok := _u.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupS3Config.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := _u.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backups3config.FieldID) - for _, f := range fields { - if !backups3config.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - if f != backups3config.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.ProfileID(); ok { - _spec.SetField(backups3config.FieldProfileID, field.TypeString, value) - } - if value, ok := _u.mutation.Name(); ok { - _spec.SetField(backups3config.FieldName, field.TypeString, value) - } - if value, ok := _u.mutation.IsActive(); ok { - _spec.SetField(backups3config.FieldIsActive, field.TypeBool, value) - } - if value, ok := _u.mutation.Enabled(); ok { - _spec.SetField(backups3config.FieldEnabled, field.TypeBool, value) - } - if value, ok := _u.mutation.Endpoint(); ok { - _spec.SetField(backups3config.FieldEndpoint, field.TypeString, value) - } - if value, ok := _u.mutation.Region(); ok { - _spec.SetField(backups3config.FieldRegion, field.TypeString, value) - } - if value, ok := _u.mutation.Bucket(); ok { - _spec.SetField(backups3config.FieldBucket, field.TypeString, value) - } - if value, ok := _u.mutation.AccessKeyID(); ok { - _spec.SetField(backups3config.FieldAccessKeyID, field.TypeString, value) - } - if value, ok := _u.mutation.SecretAccessKeyEncrypted(); ok { - _spec.SetField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString, value) - } - if _u.mutation.SecretAccessKeyEncryptedCleared() { - _spec.ClearField(backups3config.FieldSecretAccessKeyEncrypted, field.TypeString) - } - if value, ok := _u.mutation.Prefix(); ok { - _spec.SetField(backups3config.FieldPrefix, field.TypeString, value) - } - if value, ok := _u.mutation.ForcePathStyle(); ok { - _spec.SetField(backups3config.FieldForcePathStyle, field.TypeBool, value) - } - if value, ok := _u.mutation.UseSsl(); ok { - _spec.SetField(backups3config.FieldUseSsl, field.TypeBool, value) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backups3config.FieldUpdatedAt, field.TypeTime, value) - } - _node = &BackupS3Config{config: _u.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backups3config.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - _u.mutation.done = true - return _node, nil -} diff --git a/backup/ent/backupsetting.go b/backup/ent/backupsetting.go deleted file mode 100644 index 8853d6529..000000000 --- a/backup/ent/backupsetting.go +++ /dev/null @@ -1,172 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "fmt" - "strings" - "time" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" -) - -// BackupSetting is the model entity for the BackupSetting schema. -type BackupSetting struct { - config `json:"-"` - // ID of the ent. - ID int `json:"id,omitempty"` - // SourceMode holds the value of the "source_mode" field. - SourceMode backupsetting.SourceMode `json:"source_mode,omitempty"` - // BackupRoot holds the value of the "backup_root" field. - BackupRoot string `json:"backup_root,omitempty"` - // RetentionDays holds the value of the "retention_days" field. - RetentionDays int `json:"retention_days,omitempty"` - // KeepLast holds the value of the "keep_last" field. - KeepLast int `json:"keep_last,omitempty"` - // SqlitePath holds the value of the "sqlite_path" field. - SqlitePath string `json:"sqlite_path,omitempty"` - // CreatedAt holds the value of the "created_at" field. - CreatedAt time.Time `json:"created_at,omitempty"` - // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt time.Time `json:"updated_at,omitempty"` - selectValues sql.SelectValues -} - -// scanValues returns the types for scanning values from sql.Rows. -func (*BackupSetting) scanValues(columns []string) ([]any, error) { - values := make([]any, len(columns)) - for i := range columns { - switch columns[i] { - case backupsetting.FieldID, backupsetting.FieldRetentionDays, backupsetting.FieldKeepLast: - values[i] = new(sql.NullInt64) - case backupsetting.FieldSourceMode, backupsetting.FieldBackupRoot, backupsetting.FieldSqlitePath: - values[i] = new(sql.NullString) - case backupsetting.FieldCreatedAt, backupsetting.FieldUpdatedAt: - values[i] = new(sql.NullTime) - default: - values[i] = new(sql.UnknownType) - } - } - return values, nil -} - -// assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the BackupSetting fields. -func (_m *BackupSetting) assignValues(columns []string, values []any) error { - if m, n := len(values), len(columns); m < n { - return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) - } - for i := range columns { - switch columns[i] { - case backupsetting.FieldID: - value, ok := values[i].(*sql.NullInt64) - if !ok { - return fmt.Errorf("unexpected type %T for field id", value) - } - _m.ID = int(value.Int64) - case backupsetting.FieldSourceMode: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field source_mode", values[i]) - } else if value.Valid { - _m.SourceMode = backupsetting.SourceMode(value.String) - } - case backupsetting.FieldBackupRoot: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field backup_root", values[i]) - } else if value.Valid { - _m.BackupRoot = value.String - } - case backupsetting.FieldRetentionDays: - if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field retention_days", values[i]) - } else if value.Valid { - _m.RetentionDays = int(value.Int64) - } - case backupsetting.FieldKeepLast: - if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field keep_last", values[i]) - } else if value.Valid { - _m.KeepLast = int(value.Int64) - } - case backupsetting.FieldSqlitePath: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field sqlite_path", values[i]) - } else if value.Valid { - _m.SqlitePath = value.String - } - case backupsetting.FieldCreatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field created_at", values[i]) - } else if value.Valid { - _m.CreatedAt = value.Time - } - case backupsetting.FieldUpdatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field updated_at", values[i]) - } else if value.Valid { - _m.UpdatedAt = value.Time - } - default: - _m.selectValues.Set(columns[i], values[i]) - } - } - return nil -} - -// Value returns the ent.Value that was dynamically selected and assigned to the BackupSetting. -// This includes values selected through modifiers, order, etc. -func (_m *BackupSetting) Value(name string) (ent.Value, error) { - return _m.selectValues.Get(name) -} - -// Update returns a builder for updating this BackupSetting. -// Note that you need to call BackupSetting.Unwrap() before calling this method if this BackupSetting -// was returned from a transaction, and the transaction was committed or rolled back. -func (_m *BackupSetting) Update() *BackupSettingUpdateOne { - return NewBackupSettingClient(_m.config).UpdateOne(_m) -} - -// Unwrap unwraps the BackupSetting entity that was returned from a transaction after it was closed, -// so that all future queries will be executed through the driver which created the transaction. -func (_m *BackupSetting) Unwrap() *BackupSetting { - _tx, ok := _m.config.driver.(*txDriver) - if !ok { - panic("ent: BackupSetting is not a transactional entity") - } - _m.config.driver = _tx.drv - return _m -} - -// String implements the fmt.Stringer. -func (_m *BackupSetting) String() string { - var builder strings.Builder - builder.WriteString("BackupSetting(") - builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) - builder.WriteString("source_mode=") - builder.WriteString(fmt.Sprintf("%v", _m.SourceMode)) - builder.WriteString(", ") - builder.WriteString("backup_root=") - builder.WriteString(_m.BackupRoot) - builder.WriteString(", ") - builder.WriteString("retention_days=") - builder.WriteString(fmt.Sprintf("%v", _m.RetentionDays)) - builder.WriteString(", ") - builder.WriteString("keep_last=") - builder.WriteString(fmt.Sprintf("%v", _m.KeepLast)) - builder.WriteString(", ") - builder.WriteString("sqlite_path=") - builder.WriteString(_m.SqlitePath) - builder.WriteString(", ") - builder.WriteString("created_at=") - builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) - builder.WriteString(", ") - builder.WriteString("updated_at=") - builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) - builder.WriteByte(')') - return builder.String() -} - -// BackupSettings is a parsable slice of BackupSetting. -type BackupSettings []*BackupSetting diff --git a/backup/ent/backupsetting/backupsetting.go b/backup/ent/backupsetting/backupsetting.go deleted file mode 100644 index 5dc076825..000000000 --- a/backup/ent/backupsetting/backupsetting.go +++ /dev/null @@ -1,141 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupsetting - -import ( - "fmt" - "time" - - "entgo.io/ent/dialect/sql" -) - -const ( - // Label holds the string label denoting the backupsetting type in the database. - Label = "backup_setting" - // FieldID holds the string denoting the id field in the database. - FieldID = "id" - // FieldSourceMode holds the string denoting the source_mode field in the database. - FieldSourceMode = "source_mode" - // FieldBackupRoot holds the string denoting the backup_root field in the database. - FieldBackupRoot = "backup_root" - // FieldRetentionDays holds the string denoting the retention_days field in the database. - FieldRetentionDays = "retention_days" - // FieldKeepLast holds the string denoting the keep_last field in the database. - FieldKeepLast = "keep_last" - // FieldSqlitePath holds the string denoting the sqlite_path field in the database. - FieldSqlitePath = "sqlite_path" - // FieldCreatedAt holds the string denoting the created_at field in the database. - FieldCreatedAt = "created_at" - // FieldUpdatedAt holds the string denoting the updated_at field in the database. - FieldUpdatedAt = "updated_at" - // Table holds the table name of the backupsetting in the database. - Table = "backup_settings" -) - -// Columns holds all SQL columns for backupsetting fields. -var Columns = []string{ - FieldID, - FieldSourceMode, - FieldBackupRoot, - FieldRetentionDays, - FieldKeepLast, - FieldSqlitePath, - FieldCreatedAt, - FieldUpdatedAt, -} - -// ValidColumn reports if the column name is valid (part of the table columns). -func ValidColumn(column string) bool { - for i := range Columns { - if column == Columns[i] { - return true - } - } - return false -} - -var ( - // DefaultBackupRoot holds the default value on creation for the "backup_root" field. - DefaultBackupRoot string - // DefaultRetentionDays holds the default value on creation for the "retention_days" field. - DefaultRetentionDays int - // DefaultKeepLast holds the default value on creation for the "keep_last" field. - DefaultKeepLast int - // DefaultSqlitePath holds the default value on creation for the "sqlite_path" field. - DefaultSqlitePath string - // DefaultCreatedAt holds the default value on creation for the "created_at" field. - DefaultCreatedAt func() time.Time - // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. - DefaultUpdatedAt func() time.Time - // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. - UpdateDefaultUpdatedAt func() time.Time -) - -// SourceMode defines the type for the "source_mode" enum field. -type SourceMode string - -// SourceModeDirect is the default value of the SourceMode enum. -const DefaultSourceMode = SourceModeDirect - -// SourceMode values. -const ( - SourceModeDirect SourceMode = "direct" - SourceModeDockerExec SourceMode = "docker_exec" -) - -func (sm SourceMode) String() string { - return string(sm) -} - -// SourceModeValidator is a validator for the "source_mode" field enum values. It is called by the builders before save. -func SourceModeValidator(sm SourceMode) error { - switch sm { - case SourceModeDirect, SourceModeDockerExec: - return nil - default: - return fmt.Errorf("backupsetting: invalid enum value for source_mode field: %q", sm) - } -} - -// OrderOption defines the ordering options for the BackupSetting queries. -type OrderOption func(*sql.Selector) - -// ByID orders the results by the id field. -func ByID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldID, opts...).ToFunc() -} - -// BySourceMode orders the results by the source_mode field. -func BySourceMode(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldSourceMode, opts...).ToFunc() -} - -// ByBackupRoot orders the results by the backup_root field. -func ByBackupRoot(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldBackupRoot, opts...).ToFunc() -} - -// ByRetentionDays orders the results by the retention_days field. -func ByRetentionDays(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldRetentionDays, opts...).ToFunc() -} - -// ByKeepLast orders the results by the keep_last field. -func ByKeepLast(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldKeepLast, opts...).ToFunc() -} - -// BySqlitePath orders the results by the sqlite_path field. -func BySqlitePath(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldSqlitePath, opts...).ToFunc() -} - -// ByCreatedAt orders the results by the created_at field. -func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() -} - -// ByUpdatedAt orders the results by the updated_at field. -func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() -} diff --git a/backup/ent/backupsetting/where.go b/backup/ent/backupsetting/where.go deleted file mode 100644 index 29d5a860b..000000000 --- a/backup/ent/backupsetting/where.go +++ /dev/null @@ -1,410 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupsetting - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldID, id)) -} - -// BackupRoot applies equality check predicate on the "backup_root" field. It's identical to BackupRootEQ. -func BackupRoot(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldBackupRoot, v)) -} - -// RetentionDays applies equality check predicate on the "retention_days" field. It's identical to RetentionDaysEQ. -func RetentionDays(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldRetentionDays, v)) -} - -// KeepLast applies equality check predicate on the "keep_last" field. It's identical to KeepLastEQ. -func KeepLast(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldKeepLast, v)) -} - -// SqlitePath applies equality check predicate on the "sqlite_path" field. It's identical to SqlitePathEQ. -func SqlitePath(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldSqlitePath, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldCreatedAt, v)) -} - -// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. -func UpdatedAt(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// SourceModeEQ applies the EQ predicate on the "source_mode" field. -func SourceModeEQ(v SourceMode) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldSourceMode, v)) -} - -// SourceModeNEQ applies the NEQ predicate on the "source_mode" field. -func SourceModeNEQ(v SourceMode) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldSourceMode, v)) -} - -// SourceModeIn applies the In predicate on the "source_mode" field. -func SourceModeIn(vs ...SourceMode) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldSourceMode, vs...)) -} - -// SourceModeNotIn applies the NotIn predicate on the "source_mode" field. -func SourceModeNotIn(vs ...SourceMode) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldSourceMode, vs...)) -} - -// BackupRootEQ applies the EQ predicate on the "backup_root" field. -func BackupRootEQ(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldBackupRoot, v)) -} - -// BackupRootNEQ applies the NEQ predicate on the "backup_root" field. -func BackupRootNEQ(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldBackupRoot, v)) -} - -// BackupRootIn applies the In predicate on the "backup_root" field. -func BackupRootIn(vs ...string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldBackupRoot, vs...)) -} - -// BackupRootNotIn applies the NotIn predicate on the "backup_root" field. -func BackupRootNotIn(vs ...string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldBackupRoot, vs...)) -} - -// BackupRootGT applies the GT predicate on the "backup_root" field. -func BackupRootGT(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldBackupRoot, v)) -} - -// BackupRootGTE applies the GTE predicate on the "backup_root" field. -func BackupRootGTE(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldBackupRoot, v)) -} - -// BackupRootLT applies the LT predicate on the "backup_root" field. -func BackupRootLT(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldBackupRoot, v)) -} - -// BackupRootLTE applies the LTE predicate on the "backup_root" field. -func BackupRootLTE(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldBackupRoot, v)) -} - -// BackupRootContains applies the Contains predicate on the "backup_root" field. -func BackupRootContains(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldContains(FieldBackupRoot, v)) -} - -// BackupRootHasPrefix applies the HasPrefix predicate on the "backup_root" field. -func BackupRootHasPrefix(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldHasPrefix(FieldBackupRoot, v)) -} - -// BackupRootHasSuffix applies the HasSuffix predicate on the "backup_root" field. -func BackupRootHasSuffix(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldHasSuffix(FieldBackupRoot, v)) -} - -// BackupRootEqualFold applies the EqualFold predicate on the "backup_root" field. -func BackupRootEqualFold(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEqualFold(FieldBackupRoot, v)) -} - -// BackupRootContainsFold applies the ContainsFold predicate on the "backup_root" field. -func BackupRootContainsFold(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldContainsFold(FieldBackupRoot, v)) -} - -// RetentionDaysEQ applies the EQ predicate on the "retention_days" field. -func RetentionDaysEQ(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldRetentionDays, v)) -} - -// RetentionDaysNEQ applies the NEQ predicate on the "retention_days" field. -func RetentionDaysNEQ(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldRetentionDays, v)) -} - -// RetentionDaysIn applies the In predicate on the "retention_days" field. -func RetentionDaysIn(vs ...int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldRetentionDays, vs...)) -} - -// RetentionDaysNotIn applies the NotIn predicate on the "retention_days" field. -func RetentionDaysNotIn(vs ...int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldRetentionDays, vs...)) -} - -// RetentionDaysGT applies the GT predicate on the "retention_days" field. -func RetentionDaysGT(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldRetentionDays, v)) -} - -// RetentionDaysGTE applies the GTE predicate on the "retention_days" field. -func RetentionDaysGTE(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldRetentionDays, v)) -} - -// RetentionDaysLT applies the LT predicate on the "retention_days" field. -func RetentionDaysLT(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldRetentionDays, v)) -} - -// RetentionDaysLTE applies the LTE predicate on the "retention_days" field. -func RetentionDaysLTE(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldRetentionDays, v)) -} - -// KeepLastEQ applies the EQ predicate on the "keep_last" field. -func KeepLastEQ(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldKeepLast, v)) -} - -// KeepLastNEQ applies the NEQ predicate on the "keep_last" field. -func KeepLastNEQ(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldKeepLast, v)) -} - -// KeepLastIn applies the In predicate on the "keep_last" field. -func KeepLastIn(vs ...int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldKeepLast, vs...)) -} - -// KeepLastNotIn applies the NotIn predicate on the "keep_last" field. -func KeepLastNotIn(vs ...int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldKeepLast, vs...)) -} - -// KeepLastGT applies the GT predicate on the "keep_last" field. -func KeepLastGT(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldKeepLast, v)) -} - -// KeepLastGTE applies the GTE predicate on the "keep_last" field. -func KeepLastGTE(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldKeepLast, v)) -} - -// KeepLastLT applies the LT predicate on the "keep_last" field. -func KeepLastLT(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldKeepLast, v)) -} - -// KeepLastLTE applies the LTE predicate on the "keep_last" field. -func KeepLastLTE(v int) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldKeepLast, v)) -} - -// SqlitePathEQ applies the EQ predicate on the "sqlite_path" field. -func SqlitePathEQ(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldSqlitePath, v)) -} - -// SqlitePathNEQ applies the NEQ predicate on the "sqlite_path" field. -func SqlitePathNEQ(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldSqlitePath, v)) -} - -// SqlitePathIn applies the In predicate on the "sqlite_path" field. -func SqlitePathIn(vs ...string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldSqlitePath, vs...)) -} - -// SqlitePathNotIn applies the NotIn predicate on the "sqlite_path" field. -func SqlitePathNotIn(vs ...string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldSqlitePath, vs...)) -} - -// SqlitePathGT applies the GT predicate on the "sqlite_path" field. -func SqlitePathGT(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldSqlitePath, v)) -} - -// SqlitePathGTE applies the GTE predicate on the "sqlite_path" field. -func SqlitePathGTE(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldSqlitePath, v)) -} - -// SqlitePathLT applies the LT predicate on the "sqlite_path" field. -func SqlitePathLT(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldSqlitePath, v)) -} - -// SqlitePathLTE applies the LTE predicate on the "sqlite_path" field. -func SqlitePathLTE(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldSqlitePath, v)) -} - -// SqlitePathContains applies the Contains predicate on the "sqlite_path" field. -func SqlitePathContains(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldContains(FieldSqlitePath, v)) -} - -// SqlitePathHasPrefix applies the HasPrefix predicate on the "sqlite_path" field. -func SqlitePathHasPrefix(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldHasPrefix(FieldSqlitePath, v)) -} - -// SqlitePathHasSuffix applies the HasSuffix predicate on the "sqlite_path" field. -func SqlitePathHasSuffix(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldHasSuffix(FieldSqlitePath, v)) -} - -// SqlitePathEqualFold applies the EqualFold predicate on the "sqlite_path" field. -func SqlitePathEqualFold(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEqualFold(FieldSqlitePath, v)) -} - -// SqlitePathContainsFold applies the ContainsFold predicate on the "sqlite_path" field. -func SqlitePathContainsFold(v string) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldContainsFold(FieldSqlitePath, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldCreatedAt, v)) -} - -// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. -func UpdatedAtEQ(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. -func UpdatedAtNEQ(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtIn applies the In predicate on the "updated_at" field. -func UpdatedAtIn(vs ...time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. -func UpdatedAtNotIn(vs ...time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldNotIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtGT applies the GT predicate on the "updated_at" field. -func UpdatedAtGT(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGT(FieldUpdatedAt, v)) -} - -// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. -func UpdatedAtGTE(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldGTE(FieldUpdatedAt, v)) -} - -// UpdatedAtLT applies the LT predicate on the "updated_at" field. -func UpdatedAtLT(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLT(FieldUpdatedAt, v)) -} - -// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. -func UpdatedAtLTE(v time.Time) predicate.BackupSetting { - return predicate.BackupSetting(sql.FieldLTE(FieldUpdatedAt, v)) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.BackupSetting) predicate.BackupSetting { - return predicate.BackupSetting(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.BackupSetting) predicate.BackupSetting { - return predicate.BackupSetting(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.BackupSetting) predicate.BackupSetting { - return predicate.BackupSetting(sql.NotPredicates(p)) -} diff --git a/backup/ent/backupsetting_create.go b/backup/ent/backupsetting_create.go deleted file mode 100644 index 736b7d5cf..000000000 --- a/backup/ent/backupsetting_create.go +++ /dev/null @@ -1,357 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" -) - -// BackupSettingCreate is the builder for creating a BackupSetting entity. -type BackupSettingCreate struct { - config - mutation *BackupSettingMutation - hooks []Hook -} - -// SetSourceMode sets the "source_mode" field. -func (_c *BackupSettingCreate) SetSourceMode(v backupsetting.SourceMode) *BackupSettingCreate { - _c.mutation.SetSourceMode(v) - return _c -} - -// SetNillableSourceMode sets the "source_mode" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableSourceMode(v *backupsetting.SourceMode) *BackupSettingCreate { - if v != nil { - _c.SetSourceMode(*v) - } - return _c -} - -// SetBackupRoot sets the "backup_root" field. -func (_c *BackupSettingCreate) SetBackupRoot(v string) *BackupSettingCreate { - _c.mutation.SetBackupRoot(v) - return _c -} - -// SetNillableBackupRoot sets the "backup_root" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableBackupRoot(v *string) *BackupSettingCreate { - if v != nil { - _c.SetBackupRoot(*v) - } - return _c -} - -// SetRetentionDays sets the "retention_days" field. -func (_c *BackupSettingCreate) SetRetentionDays(v int) *BackupSettingCreate { - _c.mutation.SetRetentionDays(v) - return _c -} - -// SetNillableRetentionDays sets the "retention_days" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableRetentionDays(v *int) *BackupSettingCreate { - if v != nil { - _c.SetRetentionDays(*v) - } - return _c -} - -// SetKeepLast sets the "keep_last" field. -func (_c *BackupSettingCreate) SetKeepLast(v int) *BackupSettingCreate { - _c.mutation.SetKeepLast(v) - return _c -} - -// SetNillableKeepLast sets the "keep_last" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableKeepLast(v *int) *BackupSettingCreate { - if v != nil { - _c.SetKeepLast(*v) - } - return _c -} - -// SetSqlitePath sets the "sqlite_path" field. -func (_c *BackupSettingCreate) SetSqlitePath(v string) *BackupSettingCreate { - _c.mutation.SetSqlitePath(v) - return _c -} - -// SetNillableSqlitePath sets the "sqlite_path" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableSqlitePath(v *string) *BackupSettingCreate { - if v != nil { - _c.SetSqlitePath(*v) - } - return _c -} - -// SetCreatedAt sets the "created_at" field. -func (_c *BackupSettingCreate) SetCreatedAt(v time.Time) *BackupSettingCreate { - _c.mutation.SetCreatedAt(v) - return _c -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableCreatedAt(v *time.Time) *BackupSettingCreate { - if v != nil { - _c.SetCreatedAt(*v) - } - return _c -} - -// SetUpdatedAt sets the "updated_at" field. -func (_c *BackupSettingCreate) SetUpdatedAt(v time.Time) *BackupSettingCreate { - _c.mutation.SetUpdatedAt(v) - return _c -} - -// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (_c *BackupSettingCreate) SetNillableUpdatedAt(v *time.Time) *BackupSettingCreate { - if v != nil { - _c.SetUpdatedAt(*v) - } - return _c -} - -// Mutation returns the BackupSettingMutation object of the builder. -func (_c *BackupSettingCreate) Mutation() *BackupSettingMutation { - return _c.mutation -} - -// Save creates the BackupSetting in the database. -func (_c *BackupSettingCreate) Save(ctx context.Context) (*BackupSetting, error) { - _c.defaults() - return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (_c *BackupSettingCreate) SaveX(ctx context.Context) *BackupSetting { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupSettingCreate) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupSettingCreate) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_c *BackupSettingCreate) defaults() { - if _, ok := _c.mutation.SourceMode(); !ok { - v := backupsetting.DefaultSourceMode - _c.mutation.SetSourceMode(v) - } - if _, ok := _c.mutation.BackupRoot(); !ok { - v := backupsetting.DefaultBackupRoot - _c.mutation.SetBackupRoot(v) - } - if _, ok := _c.mutation.RetentionDays(); !ok { - v := backupsetting.DefaultRetentionDays - _c.mutation.SetRetentionDays(v) - } - if _, ok := _c.mutation.KeepLast(); !ok { - v := backupsetting.DefaultKeepLast - _c.mutation.SetKeepLast(v) - } - if _, ok := _c.mutation.SqlitePath(); !ok { - v := backupsetting.DefaultSqlitePath - _c.mutation.SetSqlitePath(v) - } - if _, ok := _c.mutation.CreatedAt(); !ok { - v := backupsetting.DefaultCreatedAt() - _c.mutation.SetCreatedAt(v) - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - v := backupsetting.DefaultUpdatedAt() - _c.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_c *BackupSettingCreate) check() error { - if _, ok := _c.mutation.SourceMode(); !ok { - return &ValidationError{Name: "source_mode", err: errors.New(`ent: missing required field "BackupSetting.source_mode"`)} - } - if v, ok := _c.mutation.SourceMode(); ok { - if err := backupsetting.SourceModeValidator(v); err != nil { - return &ValidationError{Name: "source_mode", err: fmt.Errorf(`ent: validator failed for field "BackupSetting.source_mode": %w`, err)} - } - } - if _, ok := _c.mutation.BackupRoot(); !ok { - return &ValidationError{Name: "backup_root", err: errors.New(`ent: missing required field "BackupSetting.backup_root"`)} - } - if _, ok := _c.mutation.RetentionDays(); !ok { - return &ValidationError{Name: "retention_days", err: errors.New(`ent: missing required field "BackupSetting.retention_days"`)} - } - if _, ok := _c.mutation.KeepLast(); !ok { - return &ValidationError{Name: "keep_last", err: errors.New(`ent: missing required field "BackupSetting.keep_last"`)} - } - if _, ok := _c.mutation.SqlitePath(); !ok { - return &ValidationError{Name: "sqlite_path", err: errors.New(`ent: missing required field "BackupSetting.sqlite_path"`)} - } - if _, ok := _c.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupSetting.created_at"`)} - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupSetting.updated_at"`)} - } - return nil -} - -func (_c *BackupSettingCreate) sqlSave(ctx context.Context) (*BackupSetting, error) { - if err := _c.check(); err != nil { - return nil, err - } - _node, _spec := _c.createSpec() - if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - _c.mutation.id = &_node.ID - _c.mutation.done = true - return _node, nil -} - -func (_c *BackupSettingCreate) createSpec() (*BackupSetting, *sqlgraph.CreateSpec) { - var ( - _node = &BackupSetting{config: _c.config} - _spec = sqlgraph.NewCreateSpec(backupsetting.Table, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) - ) - if value, ok := _c.mutation.SourceMode(); ok { - _spec.SetField(backupsetting.FieldSourceMode, field.TypeEnum, value) - _node.SourceMode = value - } - if value, ok := _c.mutation.BackupRoot(); ok { - _spec.SetField(backupsetting.FieldBackupRoot, field.TypeString, value) - _node.BackupRoot = value - } - if value, ok := _c.mutation.RetentionDays(); ok { - _spec.SetField(backupsetting.FieldRetentionDays, field.TypeInt, value) - _node.RetentionDays = value - } - if value, ok := _c.mutation.KeepLast(); ok { - _spec.SetField(backupsetting.FieldKeepLast, field.TypeInt, value) - _node.KeepLast = value - } - if value, ok := _c.mutation.SqlitePath(); ok { - _spec.SetField(backupsetting.FieldSqlitePath, field.TypeString, value) - _node.SqlitePath = value - } - if value, ok := _c.mutation.CreatedAt(); ok { - _spec.SetField(backupsetting.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if value, ok := _c.mutation.UpdatedAt(); ok { - _spec.SetField(backupsetting.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = value - } - return _node, _spec -} - -// BackupSettingCreateBulk is the builder for creating many BackupSetting entities in bulk. -type BackupSettingCreateBulk struct { - config - err error - builders []*BackupSettingCreate -} - -// Save creates the BackupSetting entities in the database. -func (_c *BackupSettingCreateBulk) Save(ctx context.Context) ([]*BackupSetting, error) { - if _c.err != nil { - return nil, _c.err - } - specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) - nodes := make([]*BackupSetting, len(_c.builders)) - mutators := make([]Mutator, len(_c.builders)) - for i := range _c.builders { - func(i int, root context.Context) { - builder := _c.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BackupSettingMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (_c *BackupSettingCreateBulk) SaveX(ctx context.Context) []*BackupSetting { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupSettingCreateBulk) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupSettingCreateBulk) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupsetting_delete.go b/backup/ent/backupsetting_delete.go deleted file mode 100644 index c672e6f1c..000000000 --- a/backup/ent/backupsetting_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupSettingDelete is the builder for deleting a BackupSetting entity. -type BackupSettingDelete struct { - config - hooks []Hook - mutation *BackupSettingMutation -} - -// Where appends a list predicates to the BackupSettingDelete builder. -func (_d *BackupSettingDelete) Where(ps ...predicate.BackupSetting) *BackupSettingDelete { - _d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (_d *BackupSettingDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupSettingDelete) ExecX(ctx context.Context) int { - n, err := _d.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (_d *BackupSettingDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(backupsetting.Table, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) - if ps := _d.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - _d.mutation.done = true - return affected, err -} - -// BackupSettingDeleteOne is the builder for deleting a single BackupSetting entity. -type BackupSettingDeleteOne struct { - _d *BackupSettingDelete -} - -// Where appends a list predicates to the BackupSettingDelete builder. -func (_d *BackupSettingDeleteOne) Where(ps ...predicate.BackupSetting) *BackupSettingDeleteOne { - _d._d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query. -func (_d *BackupSettingDeleteOne) Exec(ctx context.Context) error { - n, err := _d._d.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{backupsetting.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupSettingDeleteOne) ExecX(ctx context.Context) { - if err := _d.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupsetting_query.go b/backup/ent/backupsetting_query.go deleted file mode 100644 index 6e4f75249..000000000 --- a/backup/ent/backupsetting_query.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupSettingQuery is the builder for querying BackupSetting entities. -type BackupSettingQuery struct { - config - ctx *QueryContext - order []backupsetting.OrderOption - inters []Interceptor - predicates []predicate.BackupSetting - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the BackupSettingQuery builder. -func (_q *BackupSettingQuery) Where(ps ...predicate.BackupSetting) *BackupSettingQuery { - _q.predicates = append(_q.predicates, ps...) - return _q -} - -// Limit the number of records to be returned by this query. -func (_q *BackupSettingQuery) Limit(limit int) *BackupSettingQuery { - _q.ctx.Limit = &limit - return _q -} - -// Offset to start from. -func (_q *BackupSettingQuery) Offset(offset int) *BackupSettingQuery { - _q.ctx.Offset = &offset - return _q -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (_q *BackupSettingQuery) Unique(unique bool) *BackupSettingQuery { - _q.ctx.Unique = &unique - return _q -} - -// Order specifies how the records should be ordered. -func (_q *BackupSettingQuery) Order(o ...backupsetting.OrderOption) *BackupSettingQuery { - _q.order = append(_q.order, o...) - return _q -} - -// First returns the first BackupSetting entity from the query. -// Returns a *NotFoundError when no BackupSetting was found. -func (_q *BackupSettingQuery) First(ctx context.Context) (*BackupSetting, error) { - nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{backupsetting.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (_q *BackupSettingQuery) FirstX(ctx context.Context) *BackupSetting { - node, err := _q.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first BackupSetting ID from the query. -// Returns a *NotFoundError when no BackupSetting ID was found. -func (_q *BackupSettingQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{backupsetting.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (_q *BackupSettingQuery) FirstIDX(ctx context.Context) int { - id, err := _q.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single BackupSetting entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one BackupSetting entity is found. -// Returns a *NotFoundError when no BackupSetting entities are found. -func (_q *BackupSettingQuery) Only(ctx context.Context) (*BackupSetting, error) { - nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{backupsetting.Label} - default: - return nil, &NotSingularError{backupsetting.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (_q *BackupSettingQuery) OnlyX(ctx context.Context) *BackupSetting { - node, err := _q.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only BackupSetting ID in the query. -// Returns a *NotSingularError when more than one BackupSetting ID is found. -// Returns a *NotFoundError when no entities are found. -func (_q *BackupSettingQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{backupsetting.Label} - default: - err = &NotSingularError{backupsetting.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (_q *BackupSettingQuery) OnlyIDX(ctx context.Context) int { - id, err := _q.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of BackupSettings. -func (_q *BackupSettingQuery) All(ctx context.Context) ([]*BackupSetting, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*BackupSetting, *BackupSettingQuery]() - return withInterceptors[[]*BackupSetting](ctx, _q, qr, _q.inters) -} - -// AllX is like All, but panics if an error occurs. -func (_q *BackupSettingQuery) AllX(ctx context.Context) []*BackupSetting { - nodes, err := _q.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of BackupSetting IDs. -func (_q *BackupSettingQuery) IDs(ctx context.Context) (ids []int, err error) { - if _q.ctx.Unique == nil && _q.path != nil { - _q.Unique(true) - } - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) - if err = _q.Select(backupsetting.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (_q *BackupSettingQuery) IDsX(ctx context.Context) []int { - ids, err := _q.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (_q *BackupSettingQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) - if err := _q.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, _q, querierCount[*BackupSettingQuery](), _q.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (_q *BackupSettingQuery) CountX(ctx context.Context) int { - count, err := _q.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (_q *BackupSettingQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) - switch _, err := _q.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (_q *BackupSettingQuery) ExistX(ctx context.Context) bool { - exist, err := _q.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the BackupSettingQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (_q *BackupSettingQuery) Clone() *BackupSettingQuery { - if _q == nil { - return nil - } - return &BackupSettingQuery{ - config: _q.config, - ctx: _q.ctx.Clone(), - order: append([]backupsetting.OrderOption{}, _q.order...), - inters: append([]Interceptor{}, _q.inters...), - predicates: append([]predicate.BackupSetting{}, _q.predicates...), - // clone intermediate query. - sql: _q.sql.Clone(), - path: _q.path, - } -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// SourceMode backupsetting.SourceMode `json:"source_mode,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.BackupSetting.Query(). -// GroupBy(backupsetting.FieldSourceMode). -// Aggregate(ent.Count()). -// Scan(ctx, &v) -func (_q *BackupSettingQuery) GroupBy(field string, fields ...string) *BackupSettingGroupBy { - _q.ctx.Fields = append([]string{field}, fields...) - grbuild := &BackupSettingGroupBy{build: _q} - grbuild.flds = &_q.ctx.Fields - grbuild.label = backupsetting.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// SourceMode backupsetting.SourceMode `json:"source_mode,omitempty"` -// } -// -// client.BackupSetting.Query(). -// Select(backupsetting.FieldSourceMode). -// Scan(ctx, &v) -func (_q *BackupSettingQuery) Select(fields ...string) *BackupSettingSelect { - _q.ctx.Fields = append(_q.ctx.Fields, fields...) - sbuild := &BackupSettingSelect{BackupSettingQuery: _q} - sbuild.label = backupsetting.Label - sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a BackupSettingSelect configured with the given aggregations. -func (_q *BackupSettingQuery) Aggregate(fns ...AggregateFunc) *BackupSettingSelect { - return _q.Select().Aggregate(fns...) -} - -func (_q *BackupSettingQuery) prepareQuery(ctx context.Context) error { - for _, inter := range _q.inters { - if inter == nil { - return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, _q); err != nil { - return err - } - } - } - for _, f := range _q.ctx.Fields { - if !backupsetting.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - } - if _q.path != nil { - prev, err := _q.path(ctx) - if err != nil { - return err - } - _q.sql = prev - } - return nil -} - -func (_q *BackupSettingQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupSetting, error) { - var ( - nodes = []*BackupSetting{} - _spec = _q.querySpec() - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*BackupSetting).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &BackupSetting{config: _q.config} - nodes = append(nodes, node) - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - return nodes, nil -} - -func (_q *BackupSettingQuery) sqlCount(ctx context.Context) (int, error) { - _spec := _q.querySpec() - _spec.Node.Columns = _q.ctx.Fields - if len(_q.ctx.Fields) > 0 { - _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique - } - return sqlgraph.CountNodes(ctx, _q.driver, _spec) -} - -func (_q *BackupSettingQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(backupsetting.Table, backupsetting.Columns, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) - _spec.From = _q.sql - if unique := _q.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if _q.path != nil { - _spec.Unique = true - } - if fields := _q.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupsetting.FieldID) - for i := range fields { - if fields[i] != backupsetting.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - } - if ps := _q.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := _q.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := _q.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := _q.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (_q *BackupSettingQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(_q.driver.Dialect()) - t1 := builder.Table(backupsetting.Table) - columns := _q.ctx.Fields - if len(columns) == 0 { - columns = backupsetting.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if _q.sql != nil { - selector = _q.sql - selector.Select(selector.Columns(columns...)...) - } - if _q.ctx.Unique != nil && *_q.ctx.Unique { - selector.Distinct() - } - for _, p := range _q.predicates { - p(selector) - } - for _, p := range _q.order { - p(selector) - } - if offset := _q.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := _q.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// BackupSettingGroupBy is the group-by builder for BackupSetting entities. -type BackupSettingGroupBy struct { - selector - build *BackupSettingQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (_g *BackupSettingGroupBy) Aggregate(fns ...AggregateFunc) *BackupSettingGroupBy { - _g.fns = append(_g.fns, fns...) - return _g -} - -// Scan applies the selector query and scans the result into the given value. -func (_g *BackupSettingGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) - if err := _g.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupSettingQuery, *BackupSettingGroupBy](ctx, _g.build, _g, _g.build.inters, v) -} - -func (_g *BackupSettingGroupBy) sqlScan(ctx context.Context, root *BackupSettingQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(_g.fns)) - for _, fn := range _g.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) - for _, f := range *_g.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*_g.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// BackupSettingSelect is the builder for selecting fields of BackupSetting entities. -type BackupSettingSelect struct { - *BackupSettingQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (_s *BackupSettingSelect) Aggregate(fns ...AggregateFunc) *BackupSettingSelect { - _s.fns = append(_s.fns, fns...) - return _s -} - -// Scan applies the selector query and scans the result into the given value. -func (_s *BackupSettingSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) - if err := _s.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupSettingQuery, *BackupSettingSelect](ctx, _s.BackupSettingQuery, _s, _s.inters, v) -} - -func (_s *BackupSettingSelect) sqlScan(ctx context.Context, root *BackupSettingQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(_s.fns)) - for _, fn := range _s.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*_s.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _s.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/backup/ent/backupsetting_update.go b/backup/ent/backupsetting_update.go deleted file mode 100644 index 04b5af80f..000000000 --- a/backup/ent/backupsetting_update.go +++ /dev/null @@ -1,448 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupSettingUpdate is the builder for updating BackupSetting entities. -type BackupSettingUpdate struct { - config - hooks []Hook - mutation *BackupSettingMutation -} - -// Where appends a list predicates to the BackupSettingUpdate builder. -func (_u *BackupSettingUpdate) Where(ps ...predicate.BackupSetting) *BackupSettingUpdate { - _u.mutation.Where(ps...) - return _u -} - -// SetSourceMode sets the "source_mode" field. -func (_u *BackupSettingUpdate) SetSourceMode(v backupsetting.SourceMode) *BackupSettingUpdate { - _u.mutation.SetSourceMode(v) - return _u -} - -// SetNillableSourceMode sets the "source_mode" field if the given value is not nil. -func (_u *BackupSettingUpdate) SetNillableSourceMode(v *backupsetting.SourceMode) *BackupSettingUpdate { - if v != nil { - _u.SetSourceMode(*v) - } - return _u -} - -// SetBackupRoot sets the "backup_root" field. -func (_u *BackupSettingUpdate) SetBackupRoot(v string) *BackupSettingUpdate { - _u.mutation.SetBackupRoot(v) - return _u -} - -// SetNillableBackupRoot sets the "backup_root" field if the given value is not nil. -func (_u *BackupSettingUpdate) SetNillableBackupRoot(v *string) *BackupSettingUpdate { - if v != nil { - _u.SetBackupRoot(*v) - } - return _u -} - -// SetRetentionDays sets the "retention_days" field. -func (_u *BackupSettingUpdate) SetRetentionDays(v int) *BackupSettingUpdate { - _u.mutation.ResetRetentionDays() - _u.mutation.SetRetentionDays(v) - return _u -} - -// SetNillableRetentionDays sets the "retention_days" field if the given value is not nil. -func (_u *BackupSettingUpdate) SetNillableRetentionDays(v *int) *BackupSettingUpdate { - if v != nil { - _u.SetRetentionDays(*v) - } - return _u -} - -// AddRetentionDays adds value to the "retention_days" field. -func (_u *BackupSettingUpdate) AddRetentionDays(v int) *BackupSettingUpdate { - _u.mutation.AddRetentionDays(v) - return _u -} - -// SetKeepLast sets the "keep_last" field. -func (_u *BackupSettingUpdate) SetKeepLast(v int) *BackupSettingUpdate { - _u.mutation.ResetKeepLast() - _u.mutation.SetKeepLast(v) - return _u -} - -// SetNillableKeepLast sets the "keep_last" field if the given value is not nil. -func (_u *BackupSettingUpdate) SetNillableKeepLast(v *int) *BackupSettingUpdate { - if v != nil { - _u.SetKeepLast(*v) - } - return _u -} - -// AddKeepLast adds value to the "keep_last" field. -func (_u *BackupSettingUpdate) AddKeepLast(v int) *BackupSettingUpdate { - _u.mutation.AddKeepLast(v) - return _u -} - -// SetSqlitePath sets the "sqlite_path" field. -func (_u *BackupSettingUpdate) SetSqlitePath(v string) *BackupSettingUpdate { - _u.mutation.SetSqlitePath(v) - return _u -} - -// SetNillableSqlitePath sets the "sqlite_path" field if the given value is not nil. -func (_u *BackupSettingUpdate) SetNillableSqlitePath(v *string) *BackupSettingUpdate { - if v != nil { - _u.SetSqlitePath(*v) - } - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupSettingUpdate) SetUpdatedAt(v time.Time) *BackupSettingUpdate { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// Mutation returns the BackupSettingMutation object of the builder. -func (_u *BackupSettingUpdate) Mutation() *BackupSettingMutation { - return _u.mutation -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (_u *BackupSettingUpdate) Save(ctx context.Context) (int, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupSettingUpdate) SaveX(ctx context.Context) int { - affected, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (_u *BackupSettingUpdate) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupSettingUpdate) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupSettingUpdate) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backupsetting.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupSettingUpdate) check() error { - if v, ok := _u.mutation.SourceMode(); ok { - if err := backupsetting.SourceModeValidator(v); err != nil { - return &ValidationError{Name: "source_mode", err: fmt.Errorf(`ent: validator failed for field "BackupSetting.source_mode": %w`, err)} - } - } - return nil -} - -func (_u *BackupSettingUpdate) sqlSave(ctx context.Context) (_node int, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupsetting.Table, backupsetting.Columns, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.SourceMode(); ok { - _spec.SetField(backupsetting.FieldSourceMode, field.TypeEnum, value) - } - if value, ok := _u.mutation.BackupRoot(); ok { - _spec.SetField(backupsetting.FieldBackupRoot, field.TypeString, value) - } - if value, ok := _u.mutation.RetentionDays(); ok { - _spec.SetField(backupsetting.FieldRetentionDays, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedRetentionDays(); ok { - _spec.AddField(backupsetting.FieldRetentionDays, field.TypeInt, value) - } - if value, ok := _u.mutation.KeepLast(); ok { - _spec.SetField(backupsetting.FieldKeepLast, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedKeepLast(); ok { - _spec.AddField(backupsetting.FieldKeepLast, field.TypeInt, value) - } - if value, ok := _u.mutation.SqlitePath(); ok { - _spec.SetField(backupsetting.FieldSqlitePath, field.TypeString, value) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backupsetting.FieldUpdatedAt, field.TypeTime, value) - } - if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupsetting.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - _u.mutation.done = true - return _node, nil -} - -// BackupSettingUpdateOne is the builder for updating a single BackupSetting entity. -type BackupSettingUpdateOne struct { - config - fields []string - hooks []Hook - mutation *BackupSettingMutation -} - -// SetSourceMode sets the "source_mode" field. -func (_u *BackupSettingUpdateOne) SetSourceMode(v backupsetting.SourceMode) *BackupSettingUpdateOne { - _u.mutation.SetSourceMode(v) - return _u -} - -// SetNillableSourceMode sets the "source_mode" field if the given value is not nil. -func (_u *BackupSettingUpdateOne) SetNillableSourceMode(v *backupsetting.SourceMode) *BackupSettingUpdateOne { - if v != nil { - _u.SetSourceMode(*v) - } - return _u -} - -// SetBackupRoot sets the "backup_root" field. -func (_u *BackupSettingUpdateOne) SetBackupRoot(v string) *BackupSettingUpdateOne { - _u.mutation.SetBackupRoot(v) - return _u -} - -// SetNillableBackupRoot sets the "backup_root" field if the given value is not nil. -func (_u *BackupSettingUpdateOne) SetNillableBackupRoot(v *string) *BackupSettingUpdateOne { - if v != nil { - _u.SetBackupRoot(*v) - } - return _u -} - -// SetRetentionDays sets the "retention_days" field. -func (_u *BackupSettingUpdateOne) SetRetentionDays(v int) *BackupSettingUpdateOne { - _u.mutation.ResetRetentionDays() - _u.mutation.SetRetentionDays(v) - return _u -} - -// SetNillableRetentionDays sets the "retention_days" field if the given value is not nil. -func (_u *BackupSettingUpdateOne) SetNillableRetentionDays(v *int) *BackupSettingUpdateOne { - if v != nil { - _u.SetRetentionDays(*v) - } - return _u -} - -// AddRetentionDays adds value to the "retention_days" field. -func (_u *BackupSettingUpdateOne) AddRetentionDays(v int) *BackupSettingUpdateOne { - _u.mutation.AddRetentionDays(v) - return _u -} - -// SetKeepLast sets the "keep_last" field. -func (_u *BackupSettingUpdateOne) SetKeepLast(v int) *BackupSettingUpdateOne { - _u.mutation.ResetKeepLast() - _u.mutation.SetKeepLast(v) - return _u -} - -// SetNillableKeepLast sets the "keep_last" field if the given value is not nil. -func (_u *BackupSettingUpdateOne) SetNillableKeepLast(v *int) *BackupSettingUpdateOne { - if v != nil { - _u.SetKeepLast(*v) - } - return _u -} - -// AddKeepLast adds value to the "keep_last" field. -func (_u *BackupSettingUpdateOne) AddKeepLast(v int) *BackupSettingUpdateOne { - _u.mutation.AddKeepLast(v) - return _u -} - -// SetSqlitePath sets the "sqlite_path" field. -func (_u *BackupSettingUpdateOne) SetSqlitePath(v string) *BackupSettingUpdateOne { - _u.mutation.SetSqlitePath(v) - return _u -} - -// SetNillableSqlitePath sets the "sqlite_path" field if the given value is not nil. -func (_u *BackupSettingUpdateOne) SetNillableSqlitePath(v *string) *BackupSettingUpdateOne { - if v != nil { - _u.SetSqlitePath(*v) - } - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupSettingUpdateOne) SetUpdatedAt(v time.Time) *BackupSettingUpdateOne { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// Mutation returns the BackupSettingMutation object of the builder. -func (_u *BackupSettingUpdateOne) Mutation() *BackupSettingMutation { - return _u.mutation -} - -// Where appends a list predicates to the BackupSettingUpdate builder. -func (_u *BackupSettingUpdateOne) Where(ps ...predicate.BackupSetting) *BackupSettingUpdateOne { - _u.mutation.Where(ps...) - return _u -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (_u *BackupSettingUpdateOne) Select(field string, fields ...string) *BackupSettingUpdateOne { - _u.fields = append([]string{field}, fields...) - return _u -} - -// Save executes the query and returns the updated BackupSetting entity. -func (_u *BackupSettingUpdateOne) Save(ctx context.Context) (*BackupSetting, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupSettingUpdateOne) SaveX(ctx context.Context) *BackupSetting { - node, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (_u *BackupSettingUpdateOne) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupSettingUpdateOne) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupSettingUpdateOne) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backupsetting.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupSettingUpdateOne) check() error { - if v, ok := _u.mutation.SourceMode(); ok { - if err := backupsetting.SourceModeValidator(v); err != nil { - return &ValidationError{Name: "source_mode", err: fmt.Errorf(`ent: validator failed for field "BackupSetting.source_mode": %w`, err)} - } - } - return nil -} - -func (_u *BackupSettingUpdateOne) sqlSave(ctx context.Context) (_node *BackupSetting, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupsetting.Table, backupsetting.Columns, sqlgraph.NewFieldSpec(backupsetting.FieldID, field.TypeInt)) - id, ok := _u.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupSetting.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := _u.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupsetting.FieldID) - for _, f := range fields { - if !backupsetting.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - if f != backupsetting.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.SourceMode(); ok { - _spec.SetField(backupsetting.FieldSourceMode, field.TypeEnum, value) - } - if value, ok := _u.mutation.BackupRoot(); ok { - _spec.SetField(backupsetting.FieldBackupRoot, field.TypeString, value) - } - if value, ok := _u.mutation.RetentionDays(); ok { - _spec.SetField(backupsetting.FieldRetentionDays, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedRetentionDays(); ok { - _spec.AddField(backupsetting.FieldRetentionDays, field.TypeInt, value) - } - if value, ok := _u.mutation.KeepLast(); ok { - _spec.SetField(backupsetting.FieldKeepLast, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedKeepLast(); ok { - _spec.AddField(backupsetting.FieldKeepLast, field.TypeInt, value) - } - if value, ok := _u.mutation.SqlitePath(); ok { - _spec.SetField(backupsetting.FieldSqlitePath, field.TypeString, value) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backupsetting.FieldUpdatedAt, field.TypeTime, value) - } - _node = &BackupSetting{config: _u.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupsetting.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - _u.mutation.done = true - return _node, nil -} diff --git a/backup/ent/backupsourceconfig.go b/backup/ent/backupsourceconfig.go deleted file mode 100644 index 697a44aa7..000000000 --- a/backup/ent/backupsourceconfig.go +++ /dev/null @@ -1,267 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "fmt" - "strings" - "time" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" -) - -// BackupSourceConfig is the model entity for the BackupSourceConfig schema. -type BackupSourceConfig struct { - config `json:"-"` - // ID of the ent. - ID int `json:"id,omitempty"` - // SourceType holds the value of the "source_type" field. - SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` - // ProfileID holds the value of the "profile_id" field. - ProfileID string `json:"profile_id,omitempty"` - // Name holds the value of the "name" field. - Name string `json:"name,omitempty"` - // IsActive holds the value of the "is_active" field. - IsActive bool `json:"is_active,omitempty"` - // Host holds the value of the "host" field. - Host string `json:"host,omitempty"` - // Port holds the value of the "port" field. - Port *int `json:"port,omitempty"` - // Username holds the value of the "username" field. - Username string `json:"username,omitempty"` - // PasswordEncrypted holds the value of the "password_encrypted" field. - PasswordEncrypted string `json:"-"` - // Database holds the value of the "database" field. - Database string `json:"database,omitempty"` - // SslMode holds the value of the "ssl_mode" field. - SslMode string `json:"ssl_mode,omitempty"` - // Addr holds the value of the "addr" field. - Addr string `json:"addr,omitempty"` - // RedisDb holds the value of the "redis_db" field. - RedisDb *int `json:"redis_db,omitempty"` - // ContainerName holds the value of the "container_name" field. - ContainerName string `json:"container_name,omitempty"` - // CreatedAt holds the value of the "created_at" field. - CreatedAt time.Time `json:"created_at,omitempty"` - // UpdatedAt holds the value of the "updated_at" field. - UpdatedAt time.Time `json:"updated_at,omitempty"` - selectValues sql.SelectValues -} - -// scanValues returns the types for scanning values from sql.Rows. -func (*BackupSourceConfig) scanValues(columns []string) ([]any, error) { - values := make([]any, len(columns)) - for i := range columns { - switch columns[i] { - case backupsourceconfig.FieldIsActive: - values[i] = new(sql.NullBool) - case backupsourceconfig.FieldID, backupsourceconfig.FieldPort, backupsourceconfig.FieldRedisDb: - values[i] = new(sql.NullInt64) - case backupsourceconfig.FieldSourceType, backupsourceconfig.FieldProfileID, backupsourceconfig.FieldName, backupsourceconfig.FieldHost, backupsourceconfig.FieldUsername, backupsourceconfig.FieldPasswordEncrypted, backupsourceconfig.FieldDatabase, backupsourceconfig.FieldSslMode, backupsourceconfig.FieldAddr, backupsourceconfig.FieldContainerName: - values[i] = new(sql.NullString) - case backupsourceconfig.FieldCreatedAt, backupsourceconfig.FieldUpdatedAt: - values[i] = new(sql.NullTime) - default: - values[i] = new(sql.UnknownType) - } - } - return values, nil -} - -// assignValues assigns the values that were returned from sql.Rows (after scanning) -// to the BackupSourceConfig fields. -func (_m *BackupSourceConfig) assignValues(columns []string, values []any) error { - if m, n := len(values), len(columns); m < n { - return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) - } - for i := range columns { - switch columns[i] { - case backupsourceconfig.FieldID: - value, ok := values[i].(*sql.NullInt64) - if !ok { - return fmt.Errorf("unexpected type %T for field id", value) - } - _m.ID = int(value.Int64) - case backupsourceconfig.FieldSourceType: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field source_type", values[i]) - } else if value.Valid { - _m.SourceType = backupsourceconfig.SourceType(value.String) - } - case backupsourceconfig.FieldProfileID: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field profile_id", values[i]) - } else if value.Valid { - _m.ProfileID = value.String - } - case backupsourceconfig.FieldName: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field name", values[i]) - } else if value.Valid { - _m.Name = value.String - } - case backupsourceconfig.FieldIsActive: - if value, ok := values[i].(*sql.NullBool); !ok { - return fmt.Errorf("unexpected type %T for field is_active", values[i]) - } else if value.Valid { - _m.IsActive = value.Bool - } - case backupsourceconfig.FieldHost: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field host", values[i]) - } else if value.Valid { - _m.Host = value.String - } - case backupsourceconfig.FieldPort: - if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field port", values[i]) - } else if value.Valid { - _m.Port = new(int) - *_m.Port = int(value.Int64) - } - case backupsourceconfig.FieldUsername: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field username", values[i]) - } else if value.Valid { - _m.Username = value.String - } - case backupsourceconfig.FieldPasswordEncrypted: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field password_encrypted", values[i]) - } else if value.Valid { - _m.PasswordEncrypted = value.String - } - case backupsourceconfig.FieldDatabase: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field database", values[i]) - } else if value.Valid { - _m.Database = value.String - } - case backupsourceconfig.FieldSslMode: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field ssl_mode", values[i]) - } else if value.Valid { - _m.SslMode = value.String - } - case backupsourceconfig.FieldAddr: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field addr", values[i]) - } else if value.Valid { - _m.Addr = value.String - } - case backupsourceconfig.FieldRedisDb: - if value, ok := values[i].(*sql.NullInt64); !ok { - return fmt.Errorf("unexpected type %T for field redis_db", values[i]) - } else if value.Valid { - _m.RedisDb = new(int) - *_m.RedisDb = int(value.Int64) - } - case backupsourceconfig.FieldContainerName: - if value, ok := values[i].(*sql.NullString); !ok { - return fmt.Errorf("unexpected type %T for field container_name", values[i]) - } else if value.Valid { - _m.ContainerName = value.String - } - case backupsourceconfig.FieldCreatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field created_at", values[i]) - } else if value.Valid { - _m.CreatedAt = value.Time - } - case backupsourceconfig.FieldUpdatedAt: - if value, ok := values[i].(*sql.NullTime); !ok { - return fmt.Errorf("unexpected type %T for field updated_at", values[i]) - } else if value.Valid { - _m.UpdatedAt = value.Time - } - default: - _m.selectValues.Set(columns[i], values[i]) - } - } - return nil -} - -// Value returns the ent.Value that was dynamically selected and assigned to the BackupSourceConfig. -// This includes values selected through modifiers, order, etc. -func (_m *BackupSourceConfig) Value(name string) (ent.Value, error) { - return _m.selectValues.Get(name) -} - -// Update returns a builder for updating this BackupSourceConfig. -// Note that you need to call BackupSourceConfig.Unwrap() before calling this method if this BackupSourceConfig -// was returned from a transaction, and the transaction was committed or rolled back. -func (_m *BackupSourceConfig) Update() *BackupSourceConfigUpdateOne { - return NewBackupSourceConfigClient(_m.config).UpdateOne(_m) -} - -// Unwrap unwraps the BackupSourceConfig entity that was returned from a transaction after it was closed, -// so that all future queries will be executed through the driver which created the transaction. -func (_m *BackupSourceConfig) Unwrap() *BackupSourceConfig { - _tx, ok := _m.config.driver.(*txDriver) - if !ok { - panic("ent: BackupSourceConfig is not a transactional entity") - } - _m.config.driver = _tx.drv - return _m -} - -// String implements the fmt.Stringer. -func (_m *BackupSourceConfig) String() string { - var builder strings.Builder - builder.WriteString("BackupSourceConfig(") - builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) - builder.WriteString("source_type=") - builder.WriteString(fmt.Sprintf("%v", _m.SourceType)) - builder.WriteString(", ") - builder.WriteString("profile_id=") - builder.WriteString(_m.ProfileID) - builder.WriteString(", ") - builder.WriteString("name=") - builder.WriteString(_m.Name) - builder.WriteString(", ") - builder.WriteString("is_active=") - builder.WriteString(fmt.Sprintf("%v", _m.IsActive)) - builder.WriteString(", ") - builder.WriteString("host=") - builder.WriteString(_m.Host) - builder.WriteString(", ") - if v := _m.Port; v != nil { - builder.WriteString("port=") - builder.WriteString(fmt.Sprintf("%v", *v)) - } - builder.WriteString(", ") - builder.WriteString("username=") - builder.WriteString(_m.Username) - builder.WriteString(", ") - builder.WriteString("password_encrypted=") - builder.WriteString(", ") - builder.WriteString("database=") - builder.WriteString(_m.Database) - builder.WriteString(", ") - builder.WriteString("ssl_mode=") - builder.WriteString(_m.SslMode) - builder.WriteString(", ") - builder.WriteString("addr=") - builder.WriteString(_m.Addr) - builder.WriteString(", ") - if v := _m.RedisDb; v != nil { - builder.WriteString("redis_db=") - builder.WriteString(fmt.Sprintf("%v", *v)) - } - builder.WriteString(", ") - builder.WriteString("container_name=") - builder.WriteString(_m.ContainerName) - builder.WriteString(", ") - builder.WriteString("created_at=") - builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) - builder.WriteString(", ") - builder.WriteString("updated_at=") - builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) - builder.WriteByte(')') - return builder.String() -} - -// BackupSourceConfigs is a parsable slice of BackupSourceConfig. -type BackupSourceConfigs []*BackupSourceConfig diff --git a/backup/ent/backupsourceconfig/backupsourceconfig.go b/backup/ent/backupsourceconfig/backupsourceconfig.go deleted file mode 100644 index 202ba8549..000000000 --- a/backup/ent/backupsourceconfig/backupsourceconfig.go +++ /dev/null @@ -1,198 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupsourceconfig - -import ( - "fmt" - "time" - - "entgo.io/ent/dialect/sql" -) - -const ( - // Label holds the string label denoting the backupsourceconfig type in the database. - Label = "backup_source_config" - // FieldID holds the string denoting the id field in the database. - FieldID = "id" - // FieldSourceType holds the string denoting the source_type field in the database. - FieldSourceType = "source_type" - // FieldProfileID holds the string denoting the profile_id field in the database. - FieldProfileID = "profile_id" - // FieldName holds the string denoting the name field in the database. - FieldName = "name" - // FieldIsActive holds the string denoting the is_active field in the database. - FieldIsActive = "is_active" - // FieldHost holds the string denoting the host field in the database. - FieldHost = "host" - // FieldPort holds the string denoting the port field in the database. - FieldPort = "port" - // FieldUsername holds the string denoting the username field in the database. - FieldUsername = "username" - // FieldPasswordEncrypted holds the string denoting the password_encrypted field in the database. - FieldPasswordEncrypted = "password_encrypted" - // FieldDatabase holds the string denoting the database field in the database. - FieldDatabase = "database" - // FieldSslMode holds the string denoting the ssl_mode field in the database. - FieldSslMode = "ssl_mode" - // FieldAddr holds the string denoting the addr field in the database. - FieldAddr = "addr" - // FieldRedisDb holds the string denoting the redis_db field in the database. - FieldRedisDb = "redis_db" - // FieldContainerName holds the string denoting the container_name field in the database. - FieldContainerName = "container_name" - // FieldCreatedAt holds the string denoting the created_at field in the database. - FieldCreatedAt = "created_at" - // FieldUpdatedAt holds the string denoting the updated_at field in the database. - FieldUpdatedAt = "updated_at" - // Table holds the table name of the backupsourceconfig in the database. - Table = "backup_source_configs" -) - -// Columns holds all SQL columns for backupsourceconfig fields. -var Columns = []string{ - FieldID, - FieldSourceType, - FieldProfileID, - FieldName, - FieldIsActive, - FieldHost, - FieldPort, - FieldUsername, - FieldPasswordEncrypted, - FieldDatabase, - FieldSslMode, - FieldAddr, - FieldRedisDb, - FieldContainerName, - FieldCreatedAt, - FieldUpdatedAt, -} - -// ValidColumn reports if the column name is valid (part of the table columns). -func ValidColumn(column string) bool { - for i := range Columns { - if column == Columns[i] { - return true - } - } - return false -} - -var ( - // DefaultIsActive holds the default value on creation for the "is_active" field. - DefaultIsActive bool - // DefaultContainerName holds the default value on creation for the "container_name" field. - DefaultContainerName string - // DefaultCreatedAt holds the default value on creation for the "created_at" field. - DefaultCreatedAt func() time.Time - // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. - DefaultUpdatedAt func() time.Time - // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. - UpdateDefaultUpdatedAt func() time.Time -) - -// SourceType defines the type for the "source_type" enum field. -type SourceType string - -// SourceType values. -const ( - SourceTypePostgres SourceType = "postgres" - SourceTypeRedis SourceType = "redis" -) - -func (st SourceType) String() string { - return string(st) -} - -// SourceTypeValidator is a validator for the "source_type" field enum values. It is called by the builders before save. -func SourceTypeValidator(st SourceType) error { - switch st { - case SourceTypePostgres, SourceTypeRedis: - return nil - default: - return fmt.Errorf("backupsourceconfig: invalid enum value for source_type field: %q", st) - } -} - -// OrderOption defines the ordering options for the BackupSourceConfig queries. -type OrderOption func(*sql.Selector) - -// ByID orders the results by the id field. -func ByID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldID, opts...).ToFunc() -} - -// BySourceType orders the results by the source_type field. -func BySourceType(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldSourceType, opts...).ToFunc() -} - -// ByProfileID orders the results by the profile_id field. -func ByProfileID(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldProfileID, opts...).ToFunc() -} - -// ByName orders the results by the name field. -func ByName(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldName, opts...).ToFunc() -} - -// ByIsActive orders the results by the is_active field. -func ByIsActive(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldIsActive, opts...).ToFunc() -} - -// ByHost orders the results by the host field. -func ByHost(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldHost, opts...).ToFunc() -} - -// ByPort orders the results by the port field. -func ByPort(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldPort, opts...).ToFunc() -} - -// ByUsername orders the results by the username field. -func ByUsername(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUsername, opts...).ToFunc() -} - -// ByPasswordEncrypted orders the results by the password_encrypted field. -func ByPasswordEncrypted(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldPasswordEncrypted, opts...).ToFunc() -} - -// ByDatabase orders the results by the database field. -func ByDatabase(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldDatabase, opts...).ToFunc() -} - -// BySslMode orders the results by the ssl_mode field. -func BySslMode(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldSslMode, opts...).ToFunc() -} - -// ByAddr orders the results by the addr field. -func ByAddr(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldAddr, opts...).ToFunc() -} - -// ByRedisDb orders the results by the redis_db field. -func ByRedisDb(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldRedisDb, opts...).ToFunc() -} - -// ByContainerName orders the results by the container_name field. -func ByContainerName(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldContainerName, opts...).ToFunc() -} - -// ByCreatedAt orders the results by the created_at field. -func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() -} - -// ByUpdatedAt orders the results by the updated_at field. -func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { - return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() -} diff --git a/backup/ent/backupsourceconfig/where.go b/backup/ent/backupsourceconfig/where.go deleted file mode 100644 index 41eabc740..000000000 --- a/backup/ent/backupsourceconfig/where.go +++ /dev/null @@ -1,995 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package backupsourceconfig - -import ( - "time" - - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// ID filters vertices based on their ID field. -func ID(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldID, id)) -} - -// IDEQ applies the EQ predicate on the ID field. -func IDEQ(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldID, id)) -} - -// IDNEQ applies the NEQ predicate on the ID field. -func IDNEQ(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldID, id)) -} - -// IDIn applies the In predicate on the ID field. -func IDIn(ids ...int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldID, ids...)) -} - -// IDNotIn applies the NotIn predicate on the ID field. -func IDNotIn(ids ...int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldID, ids...)) -} - -// IDGT applies the GT predicate on the ID field. -func IDGT(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldID, id)) -} - -// IDGTE applies the GTE predicate on the ID field. -func IDGTE(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldID, id)) -} - -// IDLT applies the LT predicate on the ID field. -func IDLT(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldID, id)) -} - -// IDLTE applies the LTE predicate on the ID field. -func IDLTE(id int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldID, id)) -} - -// ProfileID applies equality check predicate on the "profile_id" field. It's identical to ProfileIDEQ. -func ProfileID(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldProfileID, v)) -} - -// Name applies equality check predicate on the "name" field. It's identical to NameEQ. -func Name(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldName, v)) -} - -// IsActive applies equality check predicate on the "is_active" field. It's identical to IsActiveEQ. -func IsActive(v bool) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldIsActive, v)) -} - -// Host applies equality check predicate on the "host" field. It's identical to HostEQ. -func Host(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldHost, v)) -} - -// Port applies equality check predicate on the "port" field. It's identical to PortEQ. -func Port(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldPort, v)) -} - -// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. -func Username(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldUsername, v)) -} - -// PasswordEncrypted applies equality check predicate on the "password_encrypted" field. It's identical to PasswordEncryptedEQ. -func PasswordEncrypted(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldPasswordEncrypted, v)) -} - -// Database applies equality check predicate on the "database" field. It's identical to DatabaseEQ. -func Database(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldDatabase, v)) -} - -// SslMode applies equality check predicate on the "ssl_mode" field. It's identical to SslModeEQ. -func SslMode(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldSslMode, v)) -} - -// Addr applies equality check predicate on the "addr" field. It's identical to AddrEQ. -func Addr(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldAddr, v)) -} - -// RedisDb applies equality check predicate on the "redis_db" field. It's identical to RedisDbEQ. -func RedisDb(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldRedisDb, v)) -} - -// ContainerName applies equality check predicate on the "container_name" field. It's identical to ContainerNameEQ. -func ContainerName(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldContainerName, v)) -} - -// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. -func CreatedAt(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldCreatedAt, v)) -} - -// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. -func UpdatedAt(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// SourceTypeEQ applies the EQ predicate on the "source_type" field. -func SourceTypeEQ(v SourceType) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldSourceType, v)) -} - -// SourceTypeNEQ applies the NEQ predicate on the "source_type" field. -func SourceTypeNEQ(v SourceType) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldSourceType, v)) -} - -// SourceTypeIn applies the In predicate on the "source_type" field. -func SourceTypeIn(vs ...SourceType) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldSourceType, vs...)) -} - -// SourceTypeNotIn applies the NotIn predicate on the "source_type" field. -func SourceTypeNotIn(vs ...SourceType) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldSourceType, vs...)) -} - -// ProfileIDEQ applies the EQ predicate on the "profile_id" field. -func ProfileIDEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldProfileID, v)) -} - -// ProfileIDNEQ applies the NEQ predicate on the "profile_id" field. -func ProfileIDNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldProfileID, v)) -} - -// ProfileIDIn applies the In predicate on the "profile_id" field. -func ProfileIDIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldProfileID, vs...)) -} - -// ProfileIDNotIn applies the NotIn predicate on the "profile_id" field. -func ProfileIDNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldProfileID, vs...)) -} - -// ProfileIDGT applies the GT predicate on the "profile_id" field. -func ProfileIDGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldProfileID, v)) -} - -// ProfileIDGTE applies the GTE predicate on the "profile_id" field. -func ProfileIDGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldProfileID, v)) -} - -// ProfileIDLT applies the LT predicate on the "profile_id" field. -func ProfileIDLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldProfileID, v)) -} - -// ProfileIDLTE applies the LTE predicate on the "profile_id" field. -func ProfileIDLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldProfileID, v)) -} - -// ProfileIDContains applies the Contains predicate on the "profile_id" field. -func ProfileIDContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldProfileID, v)) -} - -// ProfileIDHasPrefix applies the HasPrefix predicate on the "profile_id" field. -func ProfileIDHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldProfileID, v)) -} - -// ProfileIDHasSuffix applies the HasSuffix predicate on the "profile_id" field. -func ProfileIDHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldProfileID, v)) -} - -// ProfileIDEqualFold applies the EqualFold predicate on the "profile_id" field. -func ProfileIDEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldProfileID, v)) -} - -// ProfileIDContainsFold applies the ContainsFold predicate on the "profile_id" field. -func ProfileIDContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldProfileID, v)) -} - -// NameEQ applies the EQ predicate on the "name" field. -func NameEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldName, v)) -} - -// NameNEQ applies the NEQ predicate on the "name" field. -func NameNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldName, v)) -} - -// NameIn applies the In predicate on the "name" field. -func NameIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldName, vs...)) -} - -// NameNotIn applies the NotIn predicate on the "name" field. -func NameNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldName, vs...)) -} - -// NameGT applies the GT predicate on the "name" field. -func NameGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldName, v)) -} - -// NameGTE applies the GTE predicate on the "name" field. -func NameGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldName, v)) -} - -// NameLT applies the LT predicate on the "name" field. -func NameLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldName, v)) -} - -// NameLTE applies the LTE predicate on the "name" field. -func NameLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldName, v)) -} - -// NameContains applies the Contains predicate on the "name" field. -func NameContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldName, v)) -} - -// NameHasPrefix applies the HasPrefix predicate on the "name" field. -func NameHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldName, v)) -} - -// NameHasSuffix applies the HasSuffix predicate on the "name" field. -func NameHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldName, v)) -} - -// NameEqualFold applies the EqualFold predicate on the "name" field. -func NameEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldName, v)) -} - -// NameContainsFold applies the ContainsFold predicate on the "name" field. -func NameContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldName, v)) -} - -// IsActiveEQ applies the EQ predicate on the "is_active" field. -func IsActiveEQ(v bool) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldIsActive, v)) -} - -// IsActiveNEQ applies the NEQ predicate on the "is_active" field. -func IsActiveNEQ(v bool) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldIsActive, v)) -} - -// HostEQ applies the EQ predicate on the "host" field. -func HostEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldHost, v)) -} - -// HostNEQ applies the NEQ predicate on the "host" field. -func HostNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldHost, v)) -} - -// HostIn applies the In predicate on the "host" field. -func HostIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldHost, vs...)) -} - -// HostNotIn applies the NotIn predicate on the "host" field. -func HostNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldHost, vs...)) -} - -// HostGT applies the GT predicate on the "host" field. -func HostGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldHost, v)) -} - -// HostGTE applies the GTE predicate on the "host" field. -func HostGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldHost, v)) -} - -// HostLT applies the LT predicate on the "host" field. -func HostLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldHost, v)) -} - -// HostLTE applies the LTE predicate on the "host" field. -func HostLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldHost, v)) -} - -// HostContains applies the Contains predicate on the "host" field. -func HostContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldHost, v)) -} - -// HostHasPrefix applies the HasPrefix predicate on the "host" field. -func HostHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldHost, v)) -} - -// HostHasSuffix applies the HasSuffix predicate on the "host" field. -func HostHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldHost, v)) -} - -// HostIsNil applies the IsNil predicate on the "host" field. -func HostIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldHost)) -} - -// HostNotNil applies the NotNil predicate on the "host" field. -func HostNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldHost)) -} - -// HostEqualFold applies the EqualFold predicate on the "host" field. -func HostEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldHost, v)) -} - -// HostContainsFold applies the ContainsFold predicate on the "host" field. -func HostContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldHost, v)) -} - -// PortEQ applies the EQ predicate on the "port" field. -func PortEQ(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldPort, v)) -} - -// PortNEQ applies the NEQ predicate on the "port" field. -func PortNEQ(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldPort, v)) -} - -// PortIn applies the In predicate on the "port" field. -func PortIn(vs ...int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldPort, vs...)) -} - -// PortNotIn applies the NotIn predicate on the "port" field. -func PortNotIn(vs ...int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldPort, vs...)) -} - -// PortGT applies the GT predicate on the "port" field. -func PortGT(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldPort, v)) -} - -// PortGTE applies the GTE predicate on the "port" field. -func PortGTE(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldPort, v)) -} - -// PortLT applies the LT predicate on the "port" field. -func PortLT(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldPort, v)) -} - -// PortLTE applies the LTE predicate on the "port" field. -func PortLTE(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldPort, v)) -} - -// PortIsNil applies the IsNil predicate on the "port" field. -func PortIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldPort)) -} - -// PortNotNil applies the NotNil predicate on the "port" field. -func PortNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldPort)) -} - -// UsernameEQ applies the EQ predicate on the "username" field. -func UsernameEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldUsername, v)) -} - -// UsernameNEQ applies the NEQ predicate on the "username" field. -func UsernameNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldUsername, v)) -} - -// UsernameIn applies the In predicate on the "username" field. -func UsernameIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldUsername, vs...)) -} - -// UsernameNotIn applies the NotIn predicate on the "username" field. -func UsernameNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldUsername, vs...)) -} - -// UsernameGT applies the GT predicate on the "username" field. -func UsernameGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldUsername, v)) -} - -// UsernameGTE applies the GTE predicate on the "username" field. -func UsernameGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldUsername, v)) -} - -// UsernameLT applies the LT predicate on the "username" field. -func UsernameLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldUsername, v)) -} - -// UsernameLTE applies the LTE predicate on the "username" field. -func UsernameLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldUsername, v)) -} - -// UsernameContains applies the Contains predicate on the "username" field. -func UsernameContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldUsername, v)) -} - -// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. -func UsernameHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldUsername, v)) -} - -// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. -func UsernameHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldUsername, v)) -} - -// UsernameIsNil applies the IsNil predicate on the "username" field. -func UsernameIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldUsername)) -} - -// UsernameNotNil applies the NotNil predicate on the "username" field. -func UsernameNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldUsername)) -} - -// UsernameEqualFold applies the EqualFold predicate on the "username" field. -func UsernameEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldUsername, v)) -} - -// UsernameContainsFold applies the ContainsFold predicate on the "username" field. -func UsernameContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldUsername, v)) -} - -// PasswordEncryptedEQ applies the EQ predicate on the "password_encrypted" field. -func PasswordEncryptedEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedNEQ applies the NEQ predicate on the "password_encrypted" field. -func PasswordEncryptedNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedIn applies the In predicate on the "password_encrypted" field. -func PasswordEncryptedIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldPasswordEncrypted, vs...)) -} - -// PasswordEncryptedNotIn applies the NotIn predicate on the "password_encrypted" field. -func PasswordEncryptedNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldPasswordEncrypted, vs...)) -} - -// PasswordEncryptedGT applies the GT predicate on the "password_encrypted" field. -func PasswordEncryptedGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedGTE applies the GTE predicate on the "password_encrypted" field. -func PasswordEncryptedGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedLT applies the LT predicate on the "password_encrypted" field. -func PasswordEncryptedLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedLTE applies the LTE predicate on the "password_encrypted" field. -func PasswordEncryptedLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedContains applies the Contains predicate on the "password_encrypted" field. -func PasswordEncryptedContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedHasPrefix applies the HasPrefix predicate on the "password_encrypted" field. -func PasswordEncryptedHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedHasSuffix applies the HasSuffix predicate on the "password_encrypted" field. -func PasswordEncryptedHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedIsNil applies the IsNil predicate on the "password_encrypted" field. -func PasswordEncryptedIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldPasswordEncrypted)) -} - -// PasswordEncryptedNotNil applies the NotNil predicate on the "password_encrypted" field. -func PasswordEncryptedNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldPasswordEncrypted)) -} - -// PasswordEncryptedEqualFold applies the EqualFold predicate on the "password_encrypted" field. -func PasswordEncryptedEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldPasswordEncrypted, v)) -} - -// PasswordEncryptedContainsFold applies the ContainsFold predicate on the "password_encrypted" field. -func PasswordEncryptedContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldPasswordEncrypted, v)) -} - -// DatabaseEQ applies the EQ predicate on the "database" field. -func DatabaseEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldDatabase, v)) -} - -// DatabaseNEQ applies the NEQ predicate on the "database" field. -func DatabaseNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldDatabase, v)) -} - -// DatabaseIn applies the In predicate on the "database" field. -func DatabaseIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldDatabase, vs...)) -} - -// DatabaseNotIn applies the NotIn predicate on the "database" field. -func DatabaseNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldDatabase, vs...)) -} - -// DatabaseGT applies the GT predicate on the "database" field. -func DatabaseGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldDatabase, v)) -} - -// DatabaseGTE applies the GTE predicate on the "database" field. -func DatabaseGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldDatabase, v)) -} - -// DatabaseLT applies the LT predicate on the "database" field. -func DatabaseLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldDatabase, v)) -} - -// DatabaseLTE applies the LTE predicate on the "database" field. -func DatabaseLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldDatabase, v)) -} - -// DatabaseContains applies the Contains predicate on the "database" field. -func DatabaseContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldDatabase, v)) -} - -// DatabaseHasPrefix applies the HasPrefix predicate on the "database" field. -func DatabaseHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldDatabase, v)) -} - -// DatabaseHasSuffix applies the HasSuffix predicate on the "database" field. -func DatabaseHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldDatabase, v)) -} - -// DatabaseIsNil applies the IsNil predicate on the "database" field. -func DatabaseIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldDatabase)) -} - -// DatabaseNotNil applies the NotNil predicate on the "database" field. -func DatabaseNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldDatabase)) -} - -// DatabaseEqualFold applies the EqualFold predicate on the "database" field. -func DatabaseEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldDatabase, v)) -} - -// DatabaseContainsFold applies the ContainsFold predicate on the "database" field. -func DatabaseContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldDatabase, v)) -} - -// SslModeEQ applies the EQ predicate on the "ssl_mode" field. -func SslModeEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldSslMode, v)) -} - -// SslModeNEQ applies the NEQ predicate on the "ssl_mode" field. -func SslModeNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldSslMode, v)) -} - -// SslModeIn applies the In predicate on the "ssl_mode" field. -func SslModeIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldSslMode, vs...)) -} - -// SslModeNotIn applies the NotIn predicate on the "ssl_mode" field. -func SslModeNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldSslMode, vs...)) -} - -// SslModeGT applies the GT predicate on the "ssl_mode" field. -func SslModeGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldSslMode, v)) -} - -// SslModeGTE applies the GTE predicate on the "ssl_mode" field. -func SslModeGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldSslMode, v)) -} - -// SslModeLT applies the LT predicate on the "ssl_mode" field. -func SslModeLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldSslMode, v)) -} - -// SslModeLTE applies the LTE predicate on the "ssl_mode" field. -func SslModeLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldSslMode, v)) -} - -// SslModeContains applies the Contains predicate on the "ssl_mode" field. -func SslModeContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldSslMode, v)) -} - -// SslModeHasPrefix applies the HasPrefix predicate on the "ssl_mode" field. -func SslModeHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldSslMode, v)) -} - -// SslModeHasSuffix applies the HasSuffix predicate on the "ssl_mode" field. -func SslModeHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldSslMode, v)) -} - -// SslModeIsNil applies the IsNil predicate on the "ssl_mode" field. -func SslModeIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldSslMode)) -} - -// SslModeNotNil applies the NotNil predicate on the "ssl_mode" field. -func SslModeNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldSslMode)) -} - -// SslModeEqualFold applies the EqualFold predicate on the "ssl_mode" field. -func SslModeEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldSslMode, v)) -} - -// SslModeContainsFold applies the ContainsFold predicate on the "ssl_mode" field. -func SslModeContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldSslMode, v)) -} - -// AddrEQ applies the EQ predicate on the "addr" field. -func AddrEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldAddr, v)) -} - -// AddrNEQ applies the NEQ predicate on the "addr" field. -func AddrNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldAddr, v)) -} - -// AddrIn applies the In predicate on the "addr" field. -func AddrIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldAddr, vs...)) -} - -// AddrNotIn applies the NotIn predicate on the "addr" field. -func AddrNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldAddr, vs...)) -} - -// AddrGT applies the GT predicate on the "addr" field. -func AddrGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldAddr, v)) -} - -// AddrGTE applies the GTE predicate on the "addr" field. -func AddrGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldAddr, v)) -} - -// AddrLT applies the LT predicate on the "addr" field. -func AddrLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldAddr, v)) -} - -// AddrLTE applies the LTE predicate on the "addr" field. -func AddrLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldAddr, v)) -} - -// AddrContains applies the Contains predicate on the "addr" field. -func AddrContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldAddr, v)) -} - -// AddrHasPrefix applies the HasPrefix predicate on the "addr" field. -func AddrHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldAddr, v)) -} - -// AddrHasSuffix applies the HasSuffix predicate on the "addr" field. -func AddrHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldAddr, v)) -} - -// AddrIsNil applies the IsNil predicate on the "addr" field. -func AddrIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldAddr)) -} - -// AddrNotNil applies the NotNil predicate on the "addr" field. -func AddrNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldAddr)) -} - -// AddrEqualFold applies the EqualFold predicate on the "addr" field. -func AddrEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldAddr, v)) -} - -// AddrContainsFold applies the ContainsFold predicate on the "addr" field. -func AddrContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldAddr, v)) -} - -// RedisDbEQ applies the EQ predicate on the "redis_db" field. -func RedisDbEQ(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldRedisDb, v)) -} - -// RedisDbNEQ applies the NEQ predicate on the "redis_db" field. -func RedisDbNEQ(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldRedisDb, v)) -} - -// RedisDbIn applies the In predicate on the "redis_db" field. -func RedisDbIn(vs ...int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldRedisDb, vs...)) -} - -// RedisDbNotIn applies the NotIn predicate on the "redis_db" field. -func RedisDbNotIn(vs ...int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldRedisDb, vs...)) -} - -// RedisDbGT applies the GT predicate on the "redis_db" field. -func RedisDbGT(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldRedisDb, v)) -} - -// RedisDbGTE applies the GTE predicate on the "redis_db" field. -func RedisDbGTE(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldRedisDb, v)) -} - -// RedisDbLT applies the LT predicate on the "redis_db" field. -func RedisDbLT(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldRedisDb, v)) -} - -// RedisDbLTE applies the LTE predicate on the "redis_db" field. -func RedisDbLTE(v int) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldRedisDb, v)) -} - -// RedisDbIsNil applies the IsNil predicate on the "redis_db" field. -func RedisDbIsNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIsNull(FieldRedisDb)) -} - -// RedisDbNotNil applies the NotNil predicate on the "redis_db" field. -func RedisDbNotNil() predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotNull(FieldRedisDb)) -} - -// ContainerNameEQ applies the EQ predicate on the "container_name" field. -func ContainerNameEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldContainerName, v)) -} - -// ContainerNameNEQ applies the NEQ predicate on the "container_name" field. -func ContainerNameNEQ(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldContainerName, v)) -} - -// ContainerNameIn applies the In predicate on the "container_name" field. -func ContainerNameIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldContainerName, vs...)) -} - -// ContainerNameNotIn applies the NotIn predicate on the "container_name" field. -func ContainerNameNotIn(vs ...string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldContainerName, vs...)) -} - -// ContainerNameGT applies the GT predicate on the "container_name" field. -func ContainerNameGT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldContainerName, v)) -} - -// ContainerNameGTE applies the GTE predicate on the "container_name" field. -func ContainerNameGTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldContainerName, v)) -} - -// ContainerNameLT applies the LT predicate on the "container_name" field. -func ContainerNameLT(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldContainerName, v)) -} - -// ContainerNameLTE applies the LTE predicate on the "container_name" field. -func ContainerNameLTE(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldContainerName, v)) -} - -// ContainerNameContains applies the Contains predicate on the "container_name" field. -func ContainerNameContains(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContains(FieldContainerName, v)) -} - -// ContainerNameHasPrefix applies the HasPrefix predicate on the "container_name" field. -func ContainerNameHasPrefix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasPrefix(FieldContainerName, v)) -} - -// ContainerNameHasSuffix applies the HasSuffix predicate on the "container_name" field. -func ContainerNameHasSuffix(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldHasSuffix(FieldContainerName, v)) -} - -// ContainerNameEqualFold applies the EqualFold predicate on the "container_name" field. -func ContainerNameEqualFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEqualFold(FieldContainerName, v)) -} - -// ContainerNameContainsFold applies the ContainsFold predicate on the "container_name" field. -func ContainerNameContainsFold(v string) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldContainsFold(FieldContainerName, v)) -} - -// CreatedAtEQ applies the EQ predicate on the "created_at" field. -func CreatedAtEQ(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldCreatedAt, v)) -} - -// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. -func CreatedAtNEQ(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldCreatedAt, v)) -} - -// CreatedAtIn applies the In predicate on the "created_at" field. -func CreatedAtIn(vs ...time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldCreatedAt, vs...)) -} - -// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. -func CreatedAtNotIn(vs ...time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldCreatedAt, vs...)) -} - -// CreatedAtGT applies the GT predicate on the "created_at" field. -func CreatedAtGT(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldCreatedAt, v)) -} - -// CreatedAtGTE applies the GTE predicate on the "created_at" field. -func CreatedAtGTE(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldCreatedAt, v)) -} - -// CreatedAtLT applies the LT predicate on the "created_at" field. -func CreatedAtLT(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldCreatedAt, v)) -} - -// CreatedAtLTE applies the LTE predicate on the "created_at" field. -func CreatedAtLTE(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldCreatedAt, v)) -} - -// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. -func UpdatedAtEQ(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. -func UpdatedAtNEQ(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNEQ(FieldUpdatedAt, v)) -} - -// UpdatedAtIn applies the In predicate on the "updated_at" field. -func UpdatedAtIn(vs ...time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. -func UpdatedAtNotIn(vs ...time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldNotIn(FieldUpdatedAt, vs...)) -} - -// UpdatedAtGT applies the GT predicate on the "updated_at" field. -func UpdatedAtGT(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGT(FieldUpdatedAt, v)) -} - -// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. -func UpdatedAtGTE(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldGTE(FieldUpdatedAt, v)) -} - -// UpdatedAtLT applies the LT predicate on the "updated_at" field. -func UpdatedAtLT(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLT(FieldUpdatedAt, v)) -} - -// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. -func UpdatedAtLTE(v time.Time) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.FieldLTE(FieldUpdatedAt, v)) -} - -// And groups predicates with the AND operator between them. -func And(predicates ...predicate.BackupSourceConfig) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.AndPredicates(predicates...)) -} - -// Or groups predicates with the OR operator between them. -func Or(predicates ...predicate.BackupSourceConfig) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.OrPredicates(predicates...)) -} - -// Not applies the not operator on the given predicate. -func Not(p predicate.BackupSourceConfig) predicate.BackupSourceConfig { - return predicate.BackupSourceConfig(sql.NotPredicates(p)) -} diff --git a/backup/ent/backupsourceconfig_create.go b/backup/ent/backupsourceconfig_create.go deleted file mode 100644 index ab2fae9fc..000000000 --- a/backup/ent/backupsourceconfig_create.go +++ /dev/null @@ -1,465 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" -) - -// BackupSourceConfigCreate is the builder for creating a BackupSourceConfig entity. -type BackupSourceConfigCreate struct { - config - mutation *BackupSourceConfigMutation - hooks []Hook -} - -// SetSourceType sets the "source_type" field. -func (_c *BackupSourceConfigCreate) SetSourceType(v backupsourceconfig.SourceType) *BackupSourceConfigCreate { - _c.mutation.SetSourceType(v) - return _c -} - -// SetProfileID sets the "profile_id" field. -func (_c *BackupSourceConfigCreate) SetProfileID(v string) *BackupSourceConfigCreate { - _c.mutation.SetProfileID(v) - return _c -} - -// SetName sets the "name" field. -func (_c *BackupSourceConfigCreate) SetName(v string) *BackupSourceConfigCreate { - _c.mutation.SetName(v) - return _c -} - -// SetIsActive sets the "is_active" field. -func (_c *BackupSourceConfigCreate) SetIsActive(v bool) *BackupSourceConfigCreate { - _c.mutation.SetIsActive(v) - return _c -} - -// SetNillableIsActive sets the "is_active" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableIsActive(v *bool) *BackupSourceConfigCreate { - if v != nil { - _c.SetIsActive(*v) - } - return _c -} - -// SetHost sets the "host" field. -func (_c *BackupSourceConfigCreate) SetHost(v string) *BackupSourceConfigCreate { - _c.mutation.SetHost(v) - return _c -} - -// SetNillableHost sets the "host" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableHost(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetHost(*v) - } - return _c -} - -// SetPort sets the "port" field. -func (_c *BackupSourceConfigCreate) SetPort(v int) *BackupSourceConfigCreate { - _c.mutation.SetPort(v) - return _c -} - -// SetNillablePort sets the "port" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillablePort(v *int) *BackupSourceConfigCreate { - if v != nil { - _c.SetPort(*v) - } - return _c -} - -// SetUsername sets the "username" field. -func (_c *BackupSourceConfigCreate) SetUsername(v string) *BackupSourceConfigCreate { - _c.mutation.SetUsername(v) - return _c -} - -// SetNillableUsername sets the "username" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableUsername(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetUsername(*v) - } - return _c -} - -// SetPasswordEncrypted sets the "password_encrypted" field. -func (_c *BackupSourceConfigCreate) SetPasswordEncrypted(v string) *BackupSourceConfigCreate { - _c.mutation.SetPasswordEncrypted(v) - return _c -} - -// SetNillablePasswordEncrypted sets the "password_encrypted" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillablePasswordEncrypted(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetPasswordEncrypted(*v) - } - return _c -} - -// SetDatabase sets the "database" field. -func (_c *BackupSourceConfigCreate) SetDatabase(v string) *BackupSourceConfigCreate { - _c.mutation.SetDatabase(v) - return _c -} - -// SetNillableDatabase sets the "database" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableDatabase(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetDatabase(*v) - } - return _c -} - -// SetSslMode sets the "ssl_mode" field. -func (_c *BackupSourceConfigCreate) SetSslMode(v string) *BackupSourceConfigCreate { - _c.mutation.SetSslMode(v) - return _c -} - -// SetNillableSslMode sets the "ssl_mode" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableSslMode(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetSslMode(*v) - } - return _c -} - -// SetAddr sets the "addr" field. -func (_c *BackupSourceConfigCreate) SetAddr(v string) *BackupSourceConfigCreate { - _c.mutation.SetAddr(v) - return _c -} - -// SetNillableAddr sets the "addr" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableAddr(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetAddr(*v) - } - return _c -} - -// SetRedisDb sets the "redis_db" field. -func (_c *BackupSourceConfigCreate) SetRedisDb(v int) *BackupSourceConfigCreate { - _c.mutation.SetRedisDb(v) - return _c -} - -// SetNillableRedisDb sets the "redis_db" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableRedisDb(v *int) *BackupSourceConfigCreate { - if v != nil { - _c.SetRedisDb(*v) - } - return _c -} - -// SetContainerName sets the "container_name" field. -func (_c *BackupSourceConfigCreate) SetContainerName(v string) *BackupSourceConfigCreate { - _c.mutation.SetContainerName(v) - return _c -} - -// SetNillableContainerName sets the "container_name" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableContainerName(v *string) *BackupSourceConfigCreate { - if v != nil { - _c.SetContainerName(*v) - } - return _c -} - -// SetCreatedAt sets the "created_at" field. -func (_c *BackupSourceConfigCreate) SetCreatedAt(v time.Time) *BackupSourceConfigCreate { - _c.mutation.SetCreatedAt(v) - return _c -} - -// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableCreatedAt(v *time.Time) *BackupSourceConfigCreate { - if v != nil { - _c.SetCreatedAt(*v) - } - return _c -} - -// SetUpdatedAt sets the "updated_at" field. -func (_c *BackupSourceConfigCreate) SetUpdatedAt(v time.Time) *BackupSourceConfigCreate { - _c.mutation.SetUpdatedAt(v) - return _c -} - -// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. -func (_c *BackupSourceConfigCreate) SetNillableUpdatedAt(v *time.Time) *BackupSourceConfigCreate { - if v != nil { - _c.SetUpdatedAt(*v) - } - return _c -} - -// Mutation returns the BackupSourceConfigMutation object of the builder. -func (_c *BackupSourceConfigCreate) Mutation() *BackupSourceConfigMutation { - return _c.mutation -} - -// Save creates the BackupSourceConfig in the database. -func (_c *BackupSourceConfigCreate) Save(ctx context.Context) (*BackupSourceConfig, error) { - _c.defaults() - return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) -} - -// SaveX calls Save and panics if Save returns an error. -func (_c *BackupSourceConfigCreate) SaveX(ctx context.Context) *BackupSourceConfig { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupSourceConfigCreate) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupSourceConfigCreate) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_c *BackupSourceConfigCreate) defaults() { - if _, ok := _c.mutation.IsActive(); !ok { - v := backupsourceconfig.DefaultIsActive - _c.mutation.SetIsActive(v) - } - if _, ok := _c.mutation.ContainerName(); !ok { - v := backupsourceconfig.DefaultContainerName - _c.mutation.SetContainerName(v) - } - if _, ok := _c.mutation.CreatedAt(); !ok { - v := backupsourceconfig.DefaultCreatedAt() - _c.mutation.SetCreatedAt(v) - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - v := backupsourceconfig.DefaultUpdatedAt() - _c.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_c *BackupSourceConfigCreate) check() error { - if _, ok := _c.mutation.SourceType(); !ok { - return &ValidationError{Name: "source_type", err: errors.New(`ent: missing required field "BackupSourceConfig.source_type"`)} - } - if v, ok := _c.mutation.SourceType(); ok { - if err := backupsourceconfig.SourceTypeValidator(v); err != nil { - return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} - } - } - if _, ok := _c.mutation.ProfileID(); !ok { - return &ValidationError{Name: "profile_id", err: errors.New(`ent: missing required field "BackupSourceConfig.profile_id"`)} - } - if _, ok := _c.mutation.Name(); !ok { - return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "BackupSourceConfig.name"`)} - } - if _, ok := _c.mutation.IsActive(); !ok { - return &ValidationError{Name: "is_active", err: errors.New(`ent: missing required field "BackupSourceConfig.is_active"`)} - } - if _, ok := _c.mutation.ContainerName(); !ok { - return &ValidationError{Name: "container_name", err: errors.New(`ent: missing required field "BackupSourceConfig.container_name"`)} - } - if _, ok := _c.mutation.CreatedAt(); !ok { - return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "BackupSourceConfig.created_at"`)} - } - if _, ok := _c.mutation.UpdatedAt(); !ok { - return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "BackupSourceConfig.updated_at"`)} - } - return nil -} - -func (_c *BackupSourceConfigCreate) sqlSave(ctx context.Context) (*BackupSourceConfig, error) { - if err := _c.check(); err != nil { - return nil, err - } - _node, _spec := _c.createSpec() - if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - id := _spec.ID.Value.(int64) - _node.ID = int(id) - _c.mutation.id = &_node.ID - _c.mutation.done = true - return _node, nil -} - -func (_c *BackupSourceConfigCreate) createSpec() (*BackupSourceConfig, *sqlgraph.CreateSpec) { - var ( - _node = &BackupSourceConfig{config: _c.config} - _spec = sqlgraph.NewCreateSpec(backupsourceconfig.Table, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) - ) - if value, ok := _c.mutation.SourceType(); ok { - _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) - _node.SourceType = value - } - if value, ok := _c.mutation.ProfileID(); ok { - _spec.SetField(backupsourceconfig.FieldProfileID, field.TypeString, value) - _node.ProfileID = value - } - if value, ok := _c.mutation.Name(); ok { - _spec.SetField(backupsourceconfig.FieldName, field.TypeString, value) - _node.Name = value - } - if value, ok := _c.mutation.IsActive(); ok { - _spec.SetField(backupsourceconfig.FieldIsActive, field.TypeBool, value) - _node.IsActive = value - } - if value, ok := _c.mutation.Host(); ok { - _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) - _node.Host = value - } - if value, ok := _c.mutation.Port(); ok { - _spec.SetField(backupsourceconfig.FieldPort, field.TypeInt, value) - _node.Port = &value - } - if value, ok := _c.mutation.Username(); ok { - _spec.SetField(backupsourceconfig.FieldUsername, field.TypeString, value) - _node.Username = value - } - if value, ok := _c.mutation.PasswordEncrypted(); ok { - _spec.SetField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString, value) - _node.PasswordEncrypted = value - } - if value, ok := _c.mutation.Database(); ok { - _spec.SetField(backupsourceconfig.FieldDatabase, field.TypeString, value) - _node.Database = value - } - if value, ok := _c.mutation.SslMode(); ok { - _spec.SetField(backupsourceconfig.FieldSslMode, field.TypeString, value) - _node.SslMode = value - } - if value, ok := _c.mutation.Addr(); ok { - _spec.SetField(backupsourceconfig.FieldAddr, field.TypeString, value) - _node.Addr = value - } - if value, ok := _c.mutation.RedisDb(); ok { - _spec.SetField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) - _node.RedisDb = &value - } - if value, ok := _c.mutation.ContainerName(); ok { - _spec.SetField(backupsourceconfig.FieldContainerName, field.TypeString, value) - _node.ContainerName = value - } - if value, ok := _c.mutation.CreatedAt(); ok { - _spec.SetField(backupsourceconfig.FieldCreatedAt, field.TypeTime, value) - _node.CreatedAt = value - } - if value, ok := _c.mutation.UpdatedAt(); ok { - _spec.SetField(backupsourceconfig.FieldUpdatedAt, field.TypeTime, value) - _node.UpdatedAt = value - } - return _node, _spec -} - -// BackupSourceConfigCreateBulk is the builder for creating many BackupSourceConfig entities in bulk. -type BackupSourceConfigCreateBulk struct { - config - err error - builders []*BackupSourceConfigCreate -} - -// Save creates the BackupSourceConfig entities in the database. -func (_c *BackupSourceConfigCreateBulk) Save(ctx context.Context) ([]*BackupSourceConfig, error) { - if _c.err != nil { - return nil, _c.err - } - specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) - nodes := make([]*BackupSourceConfig, len(_c.builders)) - mutators := make([]Mutator, len(_c.builders)) - for i := range _c.builders { - func(i int, root context.Context) { - builder := _c.builders[i] - builder.defaults() - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutation, ok := m.(*BackupSourceConfigMutation) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - if err := builder.check(); err != nil { - return nil, err - } - builder.mutation = mutation - var err error - nodes[i], specs[i] = builder.createSpec() - if i < len(mutators)-1 { - _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) - } else { - spec := &sqlgraph.BatchCreateSpec{Nodes: specs} - // Invoke the actual operation on the latest mutation in the chain. - if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { - if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - } - } - if err != nil { - return nil, err - } - mutation.id = &nodes[i].ID - if specs[i].ID.Value != nil { - id := specs[i].ID.Value.(int64) - nodes[i].ID = int(id) - } - mutation.done = true - return nodes[i], nil - }) - for i := len(builder.hooks) - 1; i >= 0; i-- { - mut = builder.hooks[i](mut) - } - mutators[i] = mut - }(i, ctx) - } - if len(mutators) > 0 { - if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { - return nil, err - } - } - return nodes, nil -} - -// SaveX is like Save, but panics if an error occurs. -func (_c *BackupSourceConfigCreateBulk) SaveX(ctx context.Context) []*BackupSourceConfig { - v, err := _c.Save(ctx) - if err != nil { - panic(err) - } - return v -} - -// Exec executes the query. -func (_c *BackupSourceConfigCreateBulk) Exec(ctx context.Context) error { - _, err := _c.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_c *BackupSourceConfigCreateBulk) ExecX(ctx context.Context) { - if err := _c.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupsourceconfig_delete.go b/backup/ent/backupsourceconfig_delete.go deleted file mode 100644 index 0d6d36f92..000000000 --- a/backup/ent/backupsourceconfig_delete.go +++ /dev/null @@ -1,88 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupSourceConfigDelete is the builder for deleting a BackupSourceConfig entity. -type BackupSourceConfigDelete struct { - config - hooks []Hook - mutation *BackupSourceConfigMutation -} - -// Where appends a list predicates to the BackupSourceConfigDelete builder. -func (_d *BackupSourceConfigDelete) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigDelete { - _d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query and returns how many vertices were deleted. -func (_d *BackupSourceConfigDelete) Exec(ctx context.Context) (int, error) { - return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupSourceConfigDelete) ExecX(ctx context.Context) int { - n, err := _d.Exec(ctx) - if err != nil { - panic(err) - } - return n -} - -func (_d *BackupSourceConfigDelete) sqlExec(ctx context.Context) (int, error) { - _spec := sqlgraph.NewDeleteSpec(backupsourceconfig.Table, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) - if ps := _d.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) - if err != nil && sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - _d.mutation.done = true - return affected, err -} - -// BackupSourceConfigDeleteOne is the builder for deleting a single BackupSourceConfig entity. -type BackupSourceConfigDeleteOne struct { - _d *BackupSourceConfigDelete -} - -// Where appends a list predicates to the BackupSourceConfigDelete builder. -func (_d *BackupSourceConfigDeleteOne) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigDeleteOne { - _d._d.mutation.Where(ps...) - return _d -} - -// Exec executes the deletion query. -func (_d *BackupSourceConfigDeleteOne) Exec(ctx context.Context) error { - n, err := _d._d.Exec(ctx) - switch { - case err != nil: - return err - case n == 0: - return &NotFoundError{backupsourceconfig.Label} - default: - return nil - } -} - -// ExecX is like Exec, but panics if an error occurs. -func (_d *BackupSourceConfigDeleteOne) ExecX(ctx context.Context) { - if err := _d.Exec(ctx); err != nil { - panic(err) - } -} diff --git a/backup/ent/backupsourceconfig_query.go b/backup/ent/backupsourceconfig_query.go deleted file mode 100644 index 3400a3fcd..000000000 --- a/backup/ent/backupsourceconfig_query.go +++ /dev/null @@ -1,527 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "fmt" - "math" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupSourceConfigQuery is the builder for querying BackupSourceConfig entities. -type BackupSourceConfigQuery struct { - config - ctx *QueryContext - order []backupsourceconfig.OrderOption - inters []Interceptor - predicates []predicate.BackupSourceConfig - // intermediate query (i.e. traversal path). - sql *sql.Selector - path func(context.Context) (*sql.Selector, error) -} - -// Where adds a new predicate for the BackupSourceConfigQuery builder. -func (_q *BackupSourceConfigQuery) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigQuery { - _q.predicates = append(_q.predicates, ps...) - return _q -} - -// Limit the number of records to be returned by this query. -func (_q *BackupSourceConfigQuery) Limit(limit int) *BackupSourceConfigQuery { - _q.ctx.Limit = &limit - return _q -} - -// Offset to start from. -func (_q *BackupSourceConfigQuery) Offset(offset int) *BackupSourceConfigQuery { - _q.ctx.Offset = &offset - return _q -} - -// Unique configures the query builder to filter duplicate records on query. -// By default, unique is set to true, and can be disabled using this method. -func (_q *BackupSourceConfigQuery) Unique(unique bool) *BackupSourceConfigQuery { - _q.ctx.Unique = &unique - return _q -} - -// Order specifies how the records should be ordered. -func (_q *BackupSourceConfigQuery) Order(o ...backupsourceconfig.OrderOption) *BackupSourceConfigQuery { - _q.order = append(_q.order, o...) - return _q -} - -// First returns the first BackupSourceConfig entity from the query. -// Returns a *NotFoundError when no BackupSourceConfig was found. -func (_q *BackupSourceConfigQuery) First(ctx context.Context) (*BackupSourceConfig, error) { - nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) - if err != nil { - return nil, err - } - if len(nodes) == 0 { - return nil, &NotFoundError{backupsourceconfig.Label} - } - return nodes[0], nil -} - -// FirstX is like First, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) FirstX(ctx context.Context) *BackupSourceConfig { - node, err := _q.First(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return node -} - -// FirstID returns the first BackupSourceConfig ID from the query. -// Returns a *NotFoundError when no BackupSourceConfig ID was found. -func (_q *BackupSourceConfigQuery) FirstID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { - return - } - if len(ids) == 0 { - err = &NotFoundError{backupsourceconfig.Label} - return - } - return ids[0], nil -} - -// FirstIDX is like FirstID, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) FirstIDX(ctx context.Context) int { - id, err := _q.FirstID(ctx) - if err != nil && !IsNotFound(err) { - panic(err) - } - return id -} - -// Only returns a single BackupSourceConfig entity found by the query, ensuring it only returns one. -// Returns a *NotSingularError when more than one BackupSourceConfig entity is found. -// Returns a *NotFoundError when no BackupSourceConfig entities are found. -func (_q *BackupSourceConfigQuery) Only(ctx context.Context) (*BackupSourceConfig, error) { - nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) - if err != nil { - return nil, err - } - switch len(nodes) { - case 1: - return nodes[0], nil - case 0: - return nil, &NotFoundError{backupsourceconfig.Label} - default: - return nil, &NotSingularError{backupsourceconfig.Label} - } -} - -// OnlyX is like Only, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) OnlyX(ctx context.Context) *BackupSourceConfig { - node, err := _q.Only(ctx) - if err != nil { - panic(err) - } - return node -} - -// OnlyID is like Only, but returns the only BackupSourceConfig ID in the query. -// Returns a *NotSingularError when more than one BackupSourceConfig ID is found. -// Returns a *NotFoundError when no entities are found. -func (_q *BackupSourceConfigQuery) OnlyID(ctx context.Context) (id int, err error) { - var ids []int - if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { - return - } - switch len(ids) { - case 1: - id = ids[0] - case 0: - err = &NotFoundError{backupsourceconfig.Label} - default: - err = &NotSingularError{backupsourceconfig.Label} - } - return -} - -// OnlyIDX is like OnlyID, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) OnlyIDX(ctx context.Context) int { - id, err := _q.OnlyID(ctx) - if err != nil { - panic(err) - } - return id -} - -// All executes the query and returns a list of BackupSourceConfigs. -func (_q *BackupSourceConfigQuery) All(ctx context.Context) ([]*BackupSourceConfig, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) - if err := _q.prepareQuery(ctx); err != nil { - return nil, err - } - qr := querierAll[[]*BackupSourceConfig, *BackupSourceConfigQuery]() - return withInterceptors[[]*BackupSourceConfig](ctx, _q, qr, _q.inters) -} - -// AllX is like All, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) AllX(ctx context.Context) []*BackupSourceConfig { - nodes, err := _q.All(ctx) - if err != nil { - panic(err) - } - return nodes -} - -// IDs executes the query and returns a list of BackupSourceConfig IDs. -func (_q *BackupSourceConfigQuery) IDs(ctx context.Context) (ids []int, err error) { - if _q.ctx.Unique == nil && _q.path != nil { - _q.Unique(true) - } - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) - if err = _q.Select(backupsourceconfig.FieldID).Scan(ctx, &ids); err != nil { - return nil, err - } - return ids, nil -} - -// IDsX is like IDs, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) IDsX(ctx context.Context) []int { - ids, err := _q.IDs(ctx) - if err != nil { - panic(err) - } - return ids -} - -// Count returns the count of the given query. -func (_q *BackupSourceConfigQuery) Count(ctx context.Context) (int, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) - if err := _q.prepareQuery(ctx); err != nil { - return 0, err - } - return withInterceptors[int](ctx, _q, querierCount[*BackupSourceConfigQuery](), _q.inters) -} - -// CountX is like Count, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) CountX(ctx context.Context) int { - count, err := _q.Count(ctx) - if err != nil { - panic(err) - } - return count -} - -// Exist returns true if the query has elements in the graph. -func (_q *BackupSourceConfigQuery) Exist(ctx context.Context) (bool, error) { - ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) - switch _, err := _q.FirstID(ctx); { - case IsNotFound(err): - return false, nil - case err != nil: - return false, fmt.Errorf("ent: check existence: %w", err) - default: - return true, nil - } -} - -// ExistX is like Exist, but panics if an error occurs. -func (_q *BackupSourceConfigQuery) ExistX(ctx context.Context) bool { - exist, err := _q.Exist(ctx) - if err != nil { - panic(err) - } - return exist -} - -// Clone returns a duplicate of the BackupSourceConfigQuery builder, including all associated steps. It can be -// used to prepare common query builders and use them differently after the clone is made. -func (_q *BackupSourceConfigQuery) Clone() *BackupSourceConfigQuery { - if _q == nil { - return nil - } - return &BackupSourceConfigQuery{ - config: _q.config, - ctx: _q.ctx.Clone(), - order: append([]backupsourceconfig.OrderOption{}, _q.order...), - inters: append([]Interceptor{}, _q.inters...), - predicates: append([]predicate.BackupSourceConfig{}, _q.predicates...), - // clone intermediate query. - sql: _q.sql.Clone(), - path: _q.path, - } -} - -// GroupBy is used to group vertices by one or more fields/columns. -// It is often used with aggregate functions, like: count, max, mean, min, sum. -// -// Example: -// -// var v []struct { -// SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` -// Count int `json:"count,omitempty"` -// } -// -// client.BackupSourceConfig.Query(). -// GroupBy(backupsourceconfig.FieldSourceType). -// Aggregate(ent.Count()). -// Scan(ctx, &v) -func (_q *BackupSourceConfigQuery) GroupBy(field string, fields ...string) *BackupSourceConfigGroupBy { - _q.ctx.Fields = append([]string{field}, fields...) - grbuild := &BackupSourceConfigGroupBy{build: _q} - grbuild.flds = &_q.ctx.Fields - grbuild.label = backupsourceconfig.Label - grbuild.scan = grbuild.Scan - return grbuild -} - -// Select allows the selection one or more fields/columns for the given query, -// instead of selecting all fields in the entity. -// -// Example: -// -// var v []struct { -// SourceType backupsourceconfig.SourceType `json:"source_type,omitempty"` -// } -// -// client.BackupSourceConfig.Query(). -// Select(backupsourceconfig.FieldSourceType). -// Scan(ctx, &v) -func (_q *BackupSourceConfigQuery) Select(fields ...string) *BackupSourceConfigSelect { - _q.ctx.Fields = append(_q.ctx.Fields, fields...) - sbuild := &BackupSourceConfigSelect{BackupSourceConfigQuery: _q} - sbuild.label = backupsourceconfig.Label - sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan - return sbuild -} - -// Aggregate returns a BackupSourceConfigSelect configured with the given aggregations. -func (_q *BackupSourceConfigQuery) Aggregate(fns ...AggregateFunc) *BackupSourceConfigSelect { - return _q.Select().Aggregate(fns...) -} - -func (_q *BackupSourceConfigQuery) prepareQuery(ctx context.Context) error { - for _, inter := range _q.inters { - if inter == nil { - return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") - } - if trv, ok := inter.(Traverser); ok { - if err := trv.Traverse(ctx, _q); err != nil { - return err - } - } - } - for _, f := range _q.ctx.Fields { - if !backupsourceconfig.ValidColumn(f) { - return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - } - if _q.path != nil { - prev, err := _q.path(ctx) - if err != nil { - return err - } - _q.sql = prev - } - return nil -} - -func (_q *BackupSourceConfigQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*BackupSourceConfig, error) { - var ( - nodes = []*BackupSourceConfig{} - _spec = _q.querySpec() - ) - _spec.ScanValues = func(columns []string) ([]any, error) { - return (*BackupSourceConfig).scanValues(nil, columns) - } - _spec.Assign = func(columns []string, values []any) error { - node := &BackupSourceConfig{config: _q.config} - nodes = append(nodes, node) - return node.assignValues(columns, values) - } - for i := range hooks { - hooks[i](ctx, _spec) - } - if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { - return nil, err - } - if len(nodes) == 0 { - return nodes, nil - } - return nodes, nil -} - -func (_q *BackupSourceConfigQuery) sqlCount(ctx context.Context) (int, error) { - _spec := _q.querySpec() - _spec.Node.Columns = _q.ctx.Fields - if len(_q.ctx.Fields) > 0 { - _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique - } - return sqlgraph.CountNodes(ctx, _q.driver, _spec) -} - -func (_q *BackupSourceConfigQuery) querySpec() *sqlgraph.QuerySpec { - _spec := sqlgraph.NewQuerySpec(backupsourceconfig.Table, backupsourceconfig.Columns, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) - _spec.From = _q.sql - if unique := _q.ctx.Unique; unique != nil { - _spec.Unique = *unique - } else if _q.path != nil { - _spec.Unique = true - } - if fields := _q.ctx.Fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupsourceconfig.FieldID) - for i := range fields { - if fields[i] != backupsourceconfig.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) - } - } - } - if ps := _q.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if limit := _q.ctx.Limit; limit != nil { - _spec.Limit = *limit - } - if offset := _q.ctx.Offset; offset != nil { - _spec.Offset = *offset - } - if ps := _q.order; len(ps) > 0 { - _spec.Order = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - return _spec -} - -func (_q *BackupSourceConfigQuery) sqlQuery(ctx context.Context) *sql.Selector { - builder := sql.Dialect(_q.driver.Dialect()) - t1 := builder.Table(backupsourceconfig.Table) - columns := _q.ctx.Fields - if len(columns) == 0 { - columns = backupsourceconfig.Columns - } - selector := builder.Select(t1.Columns(columns...)...).From(t1) - if _q.sql != nil { - selector = _q.sql - selector.Select(selector.Columns(columns...)...) - } - if _q.ctx.Unique != nil && *_q.ctx.Unique { - selector.Distinct() - } - for _, p := range _q.predicates { - p(selector) - } - for _, p := range _q.order { - p(selector) - } - if offset := _q.ctx.Offset; offset != nil { - // limit is mandatory for offset clause. We start - // with default value, and override it below if needed. - selector.Offset(*offset).Limit(math.MaxInt32) - } - if limit := _q.ctx.Limit; limit != nil { - selector.Limit(*limit) - } - return selector -} - -// BackupSourceConfigGroupBy is the group-by builder for BackupSourceConfig entities. -type BackupSourceConfigGroupBy struct { - selector - build *BackupSourceConfigQuery -} - -// Aggregate adds the given aggregation functions to the group-by query. -func (_g *BackupSourceConfigGroupBy) Aggregate(fns ...AggregateFunc) *BackupSourceConfigGroupBy { - _g.fns = append(_g.fns, fns...) - return _g -} - -// Scan applies the selector query and scans the result into the given value. -func (_g *BackupSourceConfigGroupBy) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) - if err := _g.build.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupSourceConfigQuery, *BackupSourceConfigGroupBy](ctx, _g.build, _g, _g.build.inters, v) -} - -func (_g *BackupSourceConfigGroupBy) sqlScan(ctx context.Context, root *BackupSourceConfigQuery, v any) error { - selector := root.sqlQuery(ctx).Select() - aggregation := make([]string, 0, len(_g.fns)) - for _, fn := range _g.fns { - aggregation = append(aggregation, fn(selector)) - } - if len(selector.SelectedColumns()) == 0 { - columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) - for _, f := range *_g.flds { - columns = append(columns, selector.C(f)) - } - columns = append(columns, aggregation...) - selector.Select(columns...) - } - selector.GroupBy(selector.Columns(*_g.flds...)...) - if err := selector.Err(); err != nil { - return err - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} - -// BackupSourceConfigSelect is the builder for selecting fields of BackupSourceConfig entities. -type BackupSourceConfigSelect struct { - *BackupSourceConfigQuery - selector -} - -// Aggregate adds the given aggregation functions to the selector query. -func (_s *BackupSourceConfigSelect) Aggregate(fns ...AggregateFunc) *BackupSourceConfigSelect { - _s.fns = append(_s.fns, fns...) - return _s -} - -// Scan applies the selector query and scans the result into the given value. -func (_s *BackupSourceConfigSelect) Scan(ctx context.Context, v any) error { - ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) - if err := _s.prepareQuery(ctx); err != nil { - return err - } - return scanWithInterceptors[*BackupSourceConfigQuery, *BackupSourceConfigSelect](ctx, _s.BackupSourceConfigQuery, _s, _s.inters, v) -} - -func (_s *BackupSourceConfigSelect) sqlScan(ctx context.Context, root *BackupSourceConfigQuery, v any) error { - selector := root.sqlQuery(ctx) - aggregation := make([]string, 0, len(_s.fns)) - for _, fn := range _s.fns { - aggregation = append(aggregation, fn(selector)) - } - switch n := len(*_s.selector.flds); { - case n == 0 && len(aggregation) > 0: - selector.Select(aggregation...) - case n != 0 && len(aggregation) > 0: - selector.AppendSelect(aggregation...) - } - rows := &sql.Rows{} - query, args := selector.Query() - if err := _s.driver.Query(ctx, query, args, rows); err != nil { - return err - } - defer rows.Close() - return sql.ScanSlice(rows, v) -} diff --git a/backup/ent/backupsourceconfig_update.go b/backup/ent/backupsourceconfig_update.go deleted file mode 100644 index 4424cc485..000000000 --- a/backup/ent/backupsourceconfig_update.go +++ /dev/null @@ -1,864 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "time" - - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "entgo.io/ent/schema/field" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -// BackupSourceConfigUpdate is the builder for updating BackupSourceConfig entities. -type BackupSourceConfigUpdate struct { - config - hooks []Hook - mutation *BackupSourceConfigMutation -} - -// Where appends a list predicates to the BackupSourceConfigUpdate builder. -func (_u *BackupSourceConfigUpdate) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigUpdate { - _u.mutation.Where(ps...) - return _u -} - -// SetSourceType sets the "source_type" field. -func (_u *BackupSourceConfigUpdate) SetSourceType(v backupsourceconfig.SourceType) *BackupSourceConfigUpdate { - _u.mutation.SetSourceType(v) - return _u -} - -// SetNillableSourceType sets the "source_type" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableSourceType(v *backupsourceconfig.SourceType) *BackupSourceConfigUpdate { - if v != nil { - _u.SetSourceType(*v) - } - return _u -} - -// SetProfileID sets the "profile_id" field. -func (_u *BackupSourceConfigUpdate) SetProfileID(v string) *BackupSourceConfigUpdate { - _u.mutation.SetProfileID(v) - return _u -} - -// SetNillableProfileID sets the "profile_id" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableProfileID(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetProfileID(*v) - } - return _u -} - -// SetName sets the "name" field. -func (_u *BackupSourceConfigUpdate) SetName(v string) *BackupSourceConfigUpdate { - _u.mutation.SetName(v) - return _u -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableName(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetName(*v) - } - return _u -} - -// SetIsActive sets the "is_active" field. -func (_u *BackupSourceConfigUpdate) SetIsActive(v bool) *BackupSourceConfigUpdate { - _u.mutation.SetIsActive(v) - return _u -} - -// SetNillableIsActive sets the "is_active" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableIsActive(v *bool) *BackupSourceConfigUpdate { - if v != nil { - _u.SetIsActive(*v) - } - return _u -} - -// SetHost sets the "host" field. -func (_u *BackupSourceConfigUpdate) SetHost(v string) *BackupSourceConfigUpdate { - _u.mutation.SetHost(v) - return _u -} - -// SetNillableHost sets the "host" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableHost(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetHost(*v) - } - return _u -} - -// ClearHost clears the value of the "host" field. -func (_u *BackupSourceConfigUpdate) ClearHost() *BackupSourceConfigUpdate { - _u.mutation.ClearHost() - return _u -} - -// SetPort sets the "port" field. -func (_u *BackupSourceConfigUpdate) SetPort(v int) *BackupSourceConfigUpdate { - _u.mutation.ResetPort() - _u.mutation.SetPort(v) - return _u -} - -// SetNillablePort sets the "port" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillablePort(v *int) *BackupSourceConfigUpdate { - if v != nil { - _u.SetPort(*v) - } - return _u -} - -// AddPort adds value to the "port" field. -func (_u *BackupSourceConfigUpdate) AddPort(v int) *BackupSourceConfigUpdate { - _u.mutation.AddPort(v) - return _u -} - -// ClearPort clears the value of the "port" field. -func (_u *BackupSourceConfigUpdate) ClearPort() *BackupSourceConfigUpdate { - _u.mutation.ClearPort() - return _u -} - -// SetUsername sets the "username" field. -func (_u *BackupSourceConfigUpdate) SetUsername(v string) *BackupSourceConfigUpdate { - _u.mutation.SetUsername(v) - return _u -} - -// SetNillableUsername sets the "username" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableUsername(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetUsername(*v) - } - return _u -} - -// ClearUsername clears the value of the "username" field. -func (_u *BackupSourceConfigUpdate) ClearUsername() *BackupSourceConfigUpdate { - _u.mutation.ClearUsername() - return _u -} - -// SetPasswordEncrypted sets the "password_encrypted" field. -func (_u *BackupSourceConfigUpdate) SetPasswordEncrypted(v string) *BackupSourceConfigUpdate { - _u.mutation.SetPasswordEncrypted(v) - return _u -} - -// SetNillablePasswordEncrypted sets the "password_encrypted" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillablePasswordEncrypted(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetPasswordEncrypted(*v) - } - return _u -} - -// ClearPasswordEncrypted clears the value of the "password_encrypted" field. -func (_u *BackupSourceConfigUpdate) ClearPasswordEncrypted() *BackupSourceConfigUpdate { - _u.mutation.ClearPasswordEncrypted() - return _u -} - -// SetDatabase sets the "database" field. -func (_u *BackupSourceConfigUpdate) SetDatabase(v string) *BackupSourceConfigUpdate { - _u.mutation.SetDatabase(v) - return _u -} - -// SetNillableDatabase sets the "database" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableDatabase(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetDatabase(*v) - } - return _u -} - -// ClearDatabase clears the value of the "database" field. -func (_u *BackupSourceConfigUpdate) ClearDatabase() *BackupSourceConfigUpdate { - _u.mutation.ClearDatabase() - return _u -} - -// SetSslMode sets the "ssl_mode" field. -func (_u *BackupSourceConfigUpdate) SetSslMode(v string) *BackupSourceConfigUpdate { - _u.mutation.SetSslMode(v) - return _u -} - -// SetNillableSslMode sets the "ssl_mode" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableSslMode(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetSslMode(*v) - } - return _u -} - -// ClearSslMode clears the value of the "ssl_mode" field. -func (_u *BackupSourceConfigUpdate) ClearSslMode() *BackupSourceConfigUpdate { - _u.mutation.ClearSslMode() - return _u -} - -// SetAddr sets the "addr" field. -func (_u *BackupSourceConfigUpdate) SetAddr(v string) *BackupSourceConfigUpdate { - _u.mutation.SetAddr(v) - return _u -} - -// SetNillableAddr sets the "addr" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableAddr(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetAddr(*v) - } - return _u -} - -// ClearAddr clears the value of the "addr" field. -func (_u *BackupSourceConfigUpdate) ClearAddr() *BackupSourceConfigUpdate { - _u.mutation.ClearAddr() - return _u -} - -// SetRedisDb sets the "redis_db" field. -func (_u *BackupSourceConfigUpdate) SetRedisDb(v int) *BackupSourceConfigUpdate { - _u.mutation.ResetRedisDb() - _u.mutation.SetRedisDb(v) - return _u -} - -// SetNillableRedisDb sets the "redis_db" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableRedisDb(v *int) *BackupSourceConfigUpdate { - if v != nil { - _u.SetRedisDb(*v) - } - return _u -} - -// AddRedisDb adds value to the "redis_db" field. -func (_u *BackupSourceConfigUpdate) AddRedisDb(v int) *BackupSourceConfigUpdate { - _u.mutation.AddRedisDb(v) - return _u -} - -// ClearRedisDb clears the value of the "redis_db" field. -func (_u *BackupSourceConfigUpdate) ClearRedisDb() *BackupSourceConfigUpdate { - _u.mutation.ClearRedisDb() - return _u -} - -// SetContainerName sets the "container_name" field. -func (_u *BackupSourceConfigUpdate) SetContainerName(v string) *BackupSourceConfigUpdate { - _u.mutation.SetContainerName(v) - return _u -} - -// SetNillableContainerName sets the "container_name" field if the given value is not nil. -func (_u *BackupSourceConfigUpdate) SetNillableContainerName(v *string) *BackupSourceConfigUpdate { - if v != nil { - _u.SetContainerName(*v) - } - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupSourceConfigUpdate) SetUpdatedAt(v time.Time) *BackupSourceConfigUpdate { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// Mutation returns the BackupSourceConfigMutation object of the builder. -func (_u *BackupSourceConfigUpdate) Mutation() *BackupSourceConfigMutation { - return _u.mutation -} - -// Save executes the query and returns the number of nodes affected by the update operation. -func (_u *BackupSourceConfigUpdate) Save(ctx context.Context) (int, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupSourceConfigUpdate) SaveX(ctx context.Context) int { - affected, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return affected -} - -// Exec executes the query. -func (_u *BackupSourceConfigUpdate) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupSourceConfigUpdate) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupSourceConfigUpdate) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backupsourceconfig.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupSourceConfigUpdate) check() error { - if v, ok := _u.mutation.SourceType(); ok { - if err := backupsourceconfig.SourceTypeValidator(v); err != nil { - return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} - } - } - return nil -} - -func (_u *BackupSourceConfigUpdate) sqlSave(ctx context.Context) (_node int, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupsourceconfig.Table, backupsourceconfig.Columns, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.SourceType(); ok { - _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) - } - if value, ok := _u.mutation.ProfileID(); ok { - _spec.SetField(backupsourceconfig.FieldProfileID, field.TypeString, value) - } - if value, ok := _u.mutation.Name(); ok { - _spec.SetField(backupsourceconfig.FieldName, field.TypeString, value) - } - if value, ok := _u.mutation.IsActive(); ok { - _spec.SetField(backupsourceconfig.FieldIsActive, field.TypeBool, value) - } - if value, ok := _u.mutation.Host(); ok { - _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) - } - if _u.mutation.HostCleared() { - _spec.ClearField(backupsourceconfig.FieldHost, field.TypeString) - } - if value, ok := _u.mutation.Port(); ok { - _spec.SetField(backupsourceconfig.FieldPort, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedPort(); ok { - _spec.AddField(backupsourceconfig.FieldPort, field.TypeInt, value) - } - if _u.mutation.PortCleared() { - _spec.ClearField(backupsourceconfig.FieldPort, field.TypeInt) - } - if value, ok := _u.mutation.Username(); ok { - _spec.SetField(backupsourceconfig.FieldUsername, field.TypeString, value) - } - if _u.mutation.UsernameCleared() { - _spec.ClearField(backupsourceconfig.FieldUsername, field.TypeString) - } - if value, ok := _u.mutation.PasswordEncrypted(); ok { - _spec.SetField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString, value) - } - if _u.mutation.PasswordEncryptedCleared() { - _spec.ClearField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString) - } - if value, ok := _u.mutation.Database(); ok { - _spec.SetField(backupsourceconfig.FieldDatabase, field.TypeString, value) - } - if _u.mutation.DatabaseCleared() { - _spec.ClearField(backupsourceconfig.FieldDatabase, field.TypeString) - } - if value, ok := _u.mutation.SslMode(); ok { - _spec.SetField(backupsourceconfig.FieldSslMode, field.TypeString, value) - } - if _u.mutation.SslModeCleared() { - _spec.ClearField(backupsourceconfig.FieldSslMode, field.TypeString) - } - if value, ok := _u.mutation.Addr(); ok { - _spec.SetField(backupsourceconfig.FieldAddr, field.TypeString, value) - } - if _u.mutation.AddrCleared() { - _spec.ClearField(backupsourceconfig.FieldAddr, field.TypeString) - } - if value, ok := _u.mutation.RedisDb(); ok { - _spec.SetField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedRedisDb(); ok { - _spec.AddField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) - } - if _u.mutation.RedisDbCleared() { - _spec.ClearField(backupsourceconfig.FieldRedisDb, field.TypeInt) - } - if value, ok := _u.mutation.ContainerName(); ok { - _spec.SetField(backupsourceconfig.FieldContainerName, field.TypeString, value) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backupsourceconfig.FieldUpdatedAt, field.TypeTime, value) - } - if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupsourceconfig.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return 0, err - } - _u.mutation.done = true - return _node, nil -} - -// BackupSourceConfigUpdateOne is the builder for updating a single BackupSourceConfig entity. -type BackupSourceConfigUpdateOne struct { - config - fields []string - hooks []Hook - mutation *BackupSourceConfigMutation -} - -// SetSourceType sets the "source_type" field. -func (_u *BackupSourceConfigUpdateOne) SetSourceType(v backupsourceconfig.SourceType) *BackupSourceConfigUpdateOne { - _u.mutation.SetSourceType(v) - return _u -} - -// SetNillableSourceType sets the "source_type" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableSourceType(v *backupsourceconfig.SourceType) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetSourceType(*v) - } - return _u -} - -// SetProfileID sets the "profile_id" field. -func (_u *BackupSourceConfigUpdateOne) SetProfileID(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetProfileID(v) - return _u -} - -// SetNillableProfileID sets the "profile_id" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableProfileID(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetProfileID(*v) - } - return _u -} - -// SetName sets the "name" field. -func (_u *BackupSourceConfigUpdateOne) SetName(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetName(v) - return _u -} - -// SetNillableName sets the "name" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableName(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetName(*v) - } - return _u -} - -// SetIsActive sets the "is_active" field. -func (_u *BackupSourceConfigUpdateOne) SetIsActive(v bool) *BackupSourceConfigUpdateOne { - _u.mutation.SetIsActive(v) - return _u -} - -// SetNillableIsActive sets the "is_active" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableIsActive(v *bool) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetIsActive(*v) - } - return _u -} - -// SetHost sets the "host" field. -func (_u *BackupSourceConfigUpdateOne) SetHost(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetHost(v) - return _u -} - -// SetNillableHost sets the "host" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableHost(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetHost(*v) - } - return _u -} - -// ClearHost clears the value of the "host" field. -func (_u *BackupSourceConfigUpdateOne) ClearHost() *BackupSourceConfigUpdateOne { - _u.mutation.ClearHost() - return _u -} - -// SetPort sets the "port" field. -func (_u *BackupSourceConfigUpdateOne) SetPort(v int) *BackupSourceConfigUpdateOne { - _u.mutation.ResetPort() - _u.mutation.SetPort(v) - return _u -} - -// SetNillablePort sets the "port" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillablePort(v *int) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetPort(*v) - } - return _u -} - -// AddPort adds value to the "port" field. -func (_u *BackupSourceConfigUpdateOne) AddPort(v int) *BackupSourceConfigUpdateOne { - _u.mutation.AddPort(v) - return _u -} - -// ClearPort clears the value of the "port" field. -func (_u *BackupSourceConfigUpdateOne) ClearPort() *BackupSourceConfigUpdateOne { - _u.mutation.ClearPort() - return _u -} - -// SetUsername sets the "username" field. -func (_u *BackupSourceConfigUpdateOne) SetUsername(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetUsername(v) - return _u -} - -// SetNillableUsername sets the "username" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableUsername(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetUsername(*v) - } - return _u -} - -// ClearUsername clears the value of the "username" field. -func (_u *BackupSourceConfigUpdateOne) ClearUsername() *BackupSourceConfigUpdateOne { - _u.mutation.ClearUsername() - return _u -} - -// SetPasswordEncrypted sets the "password_encrypted" field. -func (_u *BackupSourceConfigUpdateOne) SetPasswordEncrypted(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetPasswordEncrypted(v) - return _u -} - -// SetNillablePasswordEncrypted sets the "password_encrypted" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillablePasswordEncrypted(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetPasswordEncrypted(*v) - } - return _u -} - -// ClearPasswordEncrypted clears the value of the "password_encrypted" field. -func (_u *BackupSourceConfigUpdateOne) ClearPasswordEncrypted() *BackupSourceConfigUpdateOne { - _u.mutation.ClearPasswordEncrypted() - return _u -} - -// SetDatabase sets the "database" field. -func (_u *BackupSourceConfigUpdateOne) SetDatabase(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetDatabase(v) - return _u -} - -// SetNillableDatabase sets the "database" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableDatabase(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetDatabase(*v) - } - return _u -} - -// ClearDatabase clears the value of the "database" field. -func (_u *BackupSourceConfigUpdateOne) ClearDatabase() *BackupSourceConfigUpdateOne { - _u.mutation.ClearDatabase() - return _u -} - -// SetSslMode sets the "ssl_mode" field. -func (_u *BackupSourceConfigUpdateOne) SetSslMode(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetSslMode(v) - return _u -} - -// SetNillableSslMode sets the "ssl_mode" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableSslMode(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetSslMode(*v) - } - return _u -} - -// ClearSslMode clears the value of the "ssl_mode" field. -func (_u *BackupSourceConfigUpdateOne) ClearSslMode() *BackupSourceConfigUpdateOne { - _u.mutation.ClearSslMode() - return _u -} - -// SetAddr sets the "addr" field. -func (_u *BackupSourceConfigUpdateOne) SetAddr(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetAddr(v) - return _u -} - -// SetNillableAddr sets the "addr" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableAddr(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetAddr(*v) - } - return _u -} - -// ClearAddr clears the value of the "addr" field. -func (_u *BackupSourceConfigUpdateOne) ClearAddr() *BackupSourceConfigUpdateOne { - _u.mutation.ClearAddr() - return _u -} - -// SetRedisDb sets the "redis_db" field. -func (_u *BackupSourceConfigUpdateOne) SetRedisDb(v int) *BackupSourceConfigUpdateOne { - _u.mutation.ResetRedisDb() - _u.mutation.SetRedisDb(v) - return _u -} - -// SetNillableRedisDb sets the "redis_db" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableRedisDb(v *int) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetRedisDb(*v) - } - return _u -} - -// AddRedisDb adds value to the "redis_db" field. -func (_u *BackupSourceConfigUpdateOne) AddRedisDb(v int) *BackupSourceConfigUpdateOne { - _u.mutation.AddRedisDb(v) - return _u -} - -// ClearRedisDb clears the value of the "redis_db" field. -func (_u *BackupSourceConfigUpdateOne) ClearRedisDb() *BackupSourceConfigUpdateOne { - _u.mutation.ClearRedisDb() - return _u -} - -// SetContainerName sets the "container_name" field. -func (_u *BackupSourceConfigUpdateOne) SetContainerName(v string) *BackupSourceConfigUpdateOne { - _u.mutation.SetContainerName(v) - return _u -} - -// SetNillableContainerName sets the "container_name" field if the given value is not nil. -func (_u *BackupSourceConfigUpdateOne) SetNillableContainerName(v *string) *BackupSourceConfigUpdateOne { - if v != nil { - _u.SetContainerName(*v) - } - return _u -} - -// SetUpdatedAt sets the "updated_at" field. -func (_u *BackupSourceConfigUpdateOne) SetUpdatedAt(v time.Time) *BackupSourceConfigUpdateOne { - _u.mutation.SetUpdatedAt(v) - return _u -} - -// Mutation returns the BackupSourceConfigMutation object of the builder. -func (_u *BackupSourceConfigUpdateOne) Mutation() *BackupSourceConfigMutation { - return _u.mutation -} - -// Where appends a list predicates to the BackupSourceConfigUpdate builder. -func (_u *BackupSourceConfigUpdateOne) Where(ps ...predicate.BackupSourceConfig) *BackupSourceConfigUpdateOne { - _u.mutation.Where(ps...) - return _u -} - -// Select allows selecting one or more fields (columns) of the returned entity. -// The default is selecting all fields defined in the entity schema. -func (_u *BackupSourceConfigUpdateOne) Select(field string, fields ...string) *BackupSourceConfigUpdateOne { - _u.fields = append([]string{field}, fields...) - return _u -} - -// Save executes the query and returns the updated BackupSourceConfig entity. -func (_u *BackupSourceConfigUpdateOne) Save(ctx context.Context) (*BackupSourceConfig, error) { - _u.defaults() - return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) -} - -// SaveX is like Save, but panics if an error occurs. -func (_u *BackupSourceConfigUpdateOne) SaveX(ctx context.Context) *BackupSourceConfig { - node, err := _u.Save(ctx) - if err != nil { - panic(err) - } - return node -} - -// Exec executes the query on the entity. -func (_u *BackupSourceConfigUpdateOne) Exec(ctx context.Context) error { - _, err := _u.Save(ctx) - return err -} - -// ExecX is like Exec, but panics if an error occurs. -func (_u *BackupSourceConfigUpdateOne) ExecX(ctx context.Context) { - if err := _u.Exec(ctx); err != nil { - panic(err) - } -} - -// defaults sets the default values of the builder before save. -func (_u *BackupSourceConfigUpdateOne) defaults() { - if _, ok := _u.mutation.UpdatedAt(); !ok { - v := backupsourceconfig.UpdateDefaultUpdatedAt() - _u.mutation.SetUpdatedAt(v) - } -} - -// check runs all checks and user-defined validators on the builder. -func (_u *BackupSourceConfigUpdateOne) check() error { - if v, ok := _u.mutation.SourceType(); ok { - if err := backupsourceconfig.SourceTypeValidator(v); err != nil { - return &ValidationError{Name: "source_type", err: fmt.Errorf(`ent: validator failed for field "BackupSourceConfig.source_type": %w`, err)} - } - } - return nil -} - -func (_u *BackupSourceConfigUpdateOne) sqlSave(ctx context.Context) (_node *BackupSourceConfig, err error) { - if err := _u.check(); err != nil { - return _node, err - } - _spec := sqlgraph.NewUpdateSpec(backupsourceconfig.Table, backupsourceconfig.Columns, sqlgraph.NewFieldSpec(backupsourceconfig.FieldID, field.TypeInt)) - id, ok := _u.mutation.ID() - if !ok { - return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "BackupSourceConfig.id" for update`)} - } - _spec.Node.ID.Value = id - if fields := _u.fields; len(fields) > 0 { - _spec.Node.Columns = make([]string, 0, len(fields)) - _spec.Node.Columns = append(_spec.Node.Columns, backupsourceconfig.FieldID) - for _, f := range fields { - if !backupsourceconfig.ValidColumn(f) { - return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} - } - if f != backupsourceconfig.FieldID { - _spec.Node.Columns = append(_spec.Node.Columns, f) - } - } - } - if ps := _u.mutation.predicates; len(ps) > 0 { - _spec.Predicate = func(selector *sql.Selector) { - for i := range ps { - ps[i](selector) - } - } - } - if value, ok := _u.mutation.SourceType(); ok { - _spec.SetField(backupsourceconfig.FieldSourceType, field.TypeEnum, value) - } - if value, ok := _u.mutation.ProfileID(); ok { - _spec.SetField(backupsourceconfig.FieldProfileID, field.TypeString, value) - } - if value, ok := _u.mutation.Name(); ok { - _spec.SetField(backupsourceconfig.FieldName, field.TypeString, value) - } - if value, ok := _u.mutation.IsActive(); ok { - _spec.SetField(backupsourceconfig.FieldIsActive, field.TypeBool, value) - } - if value, ok := _u.mutation.Host(); ok { - _spec.SetField(backupsourceconfig.FieldHost, field.TypeString, value) - } - if _u.mutation.HostCleared() { - _spec.ClearField(backupsourceconfig.FieldHost, field.TypeString) - } - if value, ok := _u.mutation.Port(); ok { - _spec.SetField(backupsourceconfig.FieldPort, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedPort(); ok { - _spec.AddField(backupsourceconfig.FieldPort, field.TypeInt, value) - } - if _u.mutation.PortCleared() { - _spec.ClearField(backupsourceconfig.FieldPort, field.TypeInt) - } - if value, ok := _u.mutation.Username(); ok { - _spec.SetField(backupsourceconfig.FieldUsername, field.TypeString, value) - } - if _u.mutation.UsernameCleared() { - _spec.ClearField(backupsourceconfig.FieldUsername, field.TypeString) - } - if value, ok := _u.mutation.PasswordEncrypted(); ok { - _spec.SetField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString, value) - } - if _u.mutation.PasswordEncryptedCleared() { - _spec.ClearField(backupsourceconfig.FieldPasswordEncrypted, field.TypeString) - } - if value, ok := _u.mutation.Database(); ok { - _spec.SetField(backupsourceconfig.FieldDatabase, field.TypeString, value) - } - if _u.mutation.DatabaseCleared() { - _spec.ClearField(backupsourceconfig.FieldDatabase, field.TypeString) - } - if value, ok := _u.mutation.SslMode(); ok { - _spec.SetField(backupsourceconfig.FieldSslMode, field.TypeString, value) - } - if _u.mutation.SslModeCleared() { - _spec.ClearField(backupsourceconfig.FieldSslMode, field.TypeString) - } - if value, ok := _u.mutation.Addr(); ok { - _spec.SetField(backupsourceconfig.FieldAddr, field.TypeString, value) - } - if _u.mutation.AddrCleared() { - _spec.ClearField(backupsourceconfig.FieldAddr, field.TypeString) - } - if value, ok := _u.mutation.RedisDb(); ok { - _spec.SetField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) - } - if value, ok := _u.mutation.AddedRedisDb(); ok { - _spec.AddField(backupsourceconfig.FieldRedisDb, field.TypeInt, value) - } - if _u.mutation.RedisDbCleared() { - _spec.ClearField(backupsourceconfig.FieldRedisDb, field.TypeInt) - } - if value, ok := _u.mutation.ContainerName(); ok { - _spec.SetField(backupsourceconfig.FieldContainerName, field.TypeString, value) - } - if value, ok := _u.mutation.UpdatedAt(); ok { - _spec.SetField(backupsourceconfig.FieldUpdatedAt, field.TypeTime, value) - } - _node = &BackupSourceConfig{config: _u.config} - _spec.Assign = _node.assignValues - _spec.ScanValues = _node.scanValues - if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { - if _, ok := err.(*sqlgraph.NotFoundError); ok { - err = &NotFoundError{backupsourceconfig.Label} - } else if sqlgraph.IsConstraintError(err) { - err = &ConstraintError{msg: err.Error(), wrap: err} - } - return nil, err - } - _u.mutation.done = true - return _node, nil -} diff --git a/backup/ent/client.go b/backup/ent/client.go deleted file mode 100644 index 3e6741ab4..000000000 --- a/backup/ent/client.go +++ /dev/null @@ -1,947 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "log" - "reflect" - - "github.com/Wei-Shaw/sub2api/backup/ent/migrate" - - "entgo.io/ent" - "entgo.io/ent/dialect" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" -) - -// Client is the client that holds all ent builders. -type Client struct { - config - // Schema is the client for creating, migrating and dropping schema. - Schema *migrate.Schema - // BackupJob is the client for interacting with the BackupJob builders. - BackupJob *BackupJobClient - // BackupJobEvent is the client for interacting with the BackupJobEvent builders. - BackupJobEvent *BackupJobEventClient - // BackupS3Config is the client for interacting with the BackupS3Config builders. - BackupS3Config *BackupS3ConfigClient - // BackupSetting is the client for interacting with the BackupSetting builders. - BackupSetting *BackupSettingClient - // BackupSourceConfig is the client for interacting with the BackupSourceConfig builders. - BackupSourceConfig *BackupSourceConfigClient -} - -// NewClient creates a new client configured with the given options. -func NewClient(opts ...Option) *Client { - client := &Client{config: newConfig(opts...)} - client.init() - return client -} - -func (c *Client) init() { - c.Schema = migrate.NewSchema(c.driver) - c.BackupJob = NewBackupJobClient(c.config) - c.BackupJobEvent = NewBackupJobEventClient(c.config) - c.BackupS3Config = NewBackupS3ConfigClient(c.config) - c.BackupSetting = NewBackupSettingClient(c.config) - c.BackupSourceConfig = NewBackupSourceConfigClient(c.config) -} - -type ( - // config is the configuration for the client and its builder. - config struct { - // driver used for executing database requests. - driver dialect.Driver - // debug enable a debug logging. - debug bool - // log used for logging on debug mode. - log func(...any) - // hooks to execute on mutations. - hooks *hooks - // interceptors to execute on queries. - inters *inters - } - // Option function to configure the client. - Option func(*config) -) - -// newConfig creates a new config for the client. -func newConfig(opts ...Option) config { - cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} - cfg.options(opts...) - return cfg -} - -// options applies the options on the config object. -func (c *config) options(opts ...Option) { - for _, opt := range opts { - opt(c) - } - if c.debug { - c.driver = dialect.Debug(c.driver, c.log) - } -} - -// Debug enables debug logging on the ent.Driver. -func Debug() Option { - return func(c *config) { - c.debug = true - } -} - -// Log sets the logging function for debug mode. -func Log(fn func(...any)) Option { - return func(c *config) { - c.log = fn - } -} - -// Driver configures the client driver. -func Driver(driver dialect.Driver) Option { - return func(c *config) { - c.driver = driver - } -} - -// Open opens a database/sql.DB specified by the driver name and -// the data source name, and returns a new client attached to it. -// Optional parameters can be added for configuring the client. -func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { - switch driverName { - case dialect.MySQL, dialect.Postgres, dialect.SQLite: - drv, err := sql.Open(driverName, dataSourceName) - if err != nil { - return nil, err - } - return NewClient(append(options, Driver(drv))...), nil - default: - return nil, fmt.Errorf("unsupported driver: %q", driverName) - } -} - -// ErrTxStarted is returned when trying to start a new transaction from a transactional client. -var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") - -// Tx returns a new transactional client. The provided context -// is used until the transaction is committed or rolled back. -func (c *Client) Tx(ctx context.Context) (*Tx, error) { - if _, ok := c.driver.(*txDriver); ok { - return nil, ErrTxStarted - } - tx, err := newTx(ctx, c.driver) - if err != nil { - return nil, fmt.Errorf("ent: starting a transaction: %w", err) - } - cfg := c.config - cfg.driver = tx - return &Tx{ - ctx: ctx, - config: cfg, - BackupJob: NewBackupJobClient(cfg), - BackupJobEvent: NewBackupJobEventClient(cfg), - BackupS3Config: NewBackupS3ConfigClient(cfg), - BackupSetting: NewBackupSettingClient(cfg), - BackupSourceConfig: NewBackupSourceConfigClient(cfg), - }, nil -} - -// BeginTx returns a transactional client with specified options. -func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { - if _, ok := c.driver.(*txDriver); ok { - return nil, errors.New("ent: cannot start a transaction within a transaction") - } - tx, err := c.driver.(interface { - BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) - }).BeginTx(ctx, opts) - if err != nil { - return nil, fmt.Errorf("ent: starting a transaction: %w", err) - } - cfg := c.config - cfg.driver = &txDriver{tx: tx, drv: c.driver} - return &Tx{ - ctx: ctx, - config: cfg, - BackupJob: NewBackupJobClient(cfg), - BackupJobEvent: NewBackupJobEventClient(cfg), - BackupS3Config: NewBackupS3ConfigClient(cfg), - BackupSetting: NewBackupSettingClient(cfg), - BackupSourceConfig: NewBackupSourceConfigClient(cfg), - }, nil -} - -// Debug returns a new debug-client. It's used to get verbose logging on specific operations. -// -// client.Debug(). -// BackupJob. -// Query(). -// Count(ctx) -func (c *Client) Debug() *Client { - if c.debug { - return c - } - cfg := c.config - cfg.driver = dialect.Debug(c.driver, c.log) - client := &Client{config: cfg} - client.init() - return client -} - -// Close closes the database connection and prevents new queries from starting. -func (c *Client) Close() error { - return c.driver.Close() -} - -// Use adds the mutation hooks to all the entity clients. -// In order to add hooks to a specific client, call: `client.Node.Use(...)`. -func (c *Client) Use(hooks ...Hook) { - c.BackupJob.Use(hooks...) - c.BackupJobEvent.Use(hooks...) - c.BackupS3Config.Use(hooks...) - c.BackupSetting.Use(hooks...) - c.BackupSourceConfig.Use(hooks...) -} - -// Intercept adds the query interceptors to all the entity clients. -// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. -func (c *Client) Intercept(interceptors ...Interceptor) { - c.BackupJob.Intercept(interceptors...) - c.BackupJobEvent.Intercept(interceptors...) - c.BackupS3Config.Intercept(interceptors...) - c.BackupSetting.Intercept(interceptors...) - c.BackupSourceConfig.Intercept(interceptors...) -} - -// Mutate implements the ent.Mutator interface. -func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { - switch m := m.(type) { - case *BackupJobMutation: - return c.BackupJob.mutate(ctx, m) - case *BackupJobEventMutation: - return c.BackupJobEvent.mutate(ctx, m) - case *BackupS3ConfigMutation: - return c.BackupS3Config.mutate(ctx, m) - case *BackupSettingMutation: - return c.BackupSetting.mutate(ctx, m) - case *BackupSourceConfigMutation: - return c.BackupSourceConfig.mutate(ctx, m) - default: - return nil, fmt.Errorf("ent: unknown mutation type %T", m) - } -} - -// BackupJobClient is a client for the BackupJob schema. -type BackupJobClient struct { - config -} - -// NewBackupJobClient returns a client for the BackupJob from the given config. -func NewBackupJobClient(c config) *BackupJobClient { - return &BackupJobClient{config: c} -} - -// Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `backupjob.Hooks(f(g(h())))`. -func (c *BackupJobClient) Use(hooks ...Hook) { - c.hooks.BackupJob = append(c.hooks.BackupJob, hooks...) -} - -// Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `backupjob.Intercept(f(g(h())))`. -func (c *BackupJobClient) Intercept(interceptors ...Interceptor) { - c.inters.BackupJob = append(c.inters.BackupJob, interceptors...) -} - -// Create returns a builder for creating a BackupJob entity. -func (c *BackupJobClient) Create() *BackupJobCreate { - mutation := newBackupJobMutation(c.config, OpCreate) - return &BackupJobCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// CreateBulk returns a builder for creating a bulk of BackupJob entities. -func (c *BackupJobClient) CreateBulk(builders ...*BackupJobCreate) *BackupJobCreateBulk { - return &BackupJobCreateBulk{config: c.config, builders: builders} -} - -// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates -// a builder and applies setFunc on it. -func (c *BackupJobClient) MapCreateBulk(slice any, setFunc func(*BackupJobCreate, int)) *BackupJobCreateBulk { - rv := reflect.ValueOf(slice) - if rv.Kind() != reflect.Slice { - return &BackupJobCreateBulk{err: fmt.Errorf("calling to BackupJobClient.MapCreateBulk with wrong type %T, need slice", slice)} - } - builders := make([]*BackupJobCreate, rv.Len()) - for i := 0; i < rv.Len(); i++ { - builders[i] = c.Create() - setFunc(builders[i], i) - } - return &BackupJobCreateBulk{config: c.config, builders: builders} -} - -// Update returns an update builder for BackupJob. -func (c *BackupJobClient) Update() *BackupJobUpdate { - mutation := newBackupJobMutation(c.config, OpUpdate) - return &BackupJobUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOne returns an update builder for the given entity. -func (c *BackupJobClient) UpdateOne(_m *BackupJob) *BackupJobUpdateOne { - mutation := newBackupJobMutation(c.config, OpUpdateOne, withBackupJob(_m)) - return &BackupJobUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOneID returns an update builder for the given id. -func (c *BackupJobClient) UpdateOneID(id int) *BackupJobUpdateOne { - mutation := newBackupJobMutation(c.config, OpUpdateOne, withBackupJobID(id)) - return &BackupJobUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// Delete returns a delete builder for BackupJob. -func (c *BackupJobClient) Delete() *BackupJobDelete { - mutation := newBackupJobMutation(c.config, OpDelete) - return &BackupJobDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// DeleteOne returns a builder for deleting the given entity. -func (c *BackupJobClient) DeleteOne(_m *BackupJob) *BackupJobDeleteOne { - return c.DeleteOneID(_m.ID) -} - -// DeleteOneID returns a builder for deleting the given entity by its id. -func (c *BackupJobClient) DeleteOneID(id int) *BackupJobDeleteOne { - builder := c.Delete().Where(backupjob.ID(id)) - builder.mutation.id = &id - builder.mutation.op = OpDeleteOne - return &BackupJobDeleteOne{builder} -} - -// Query returns a query builder for BackupJob. -func (c *BackupJobClient) Query() *BackupJobQuery { - return &BackupJobQuery{ - config: c.config, - ctx: &QueryContext{Type: TypeBackupJob}, - inters: c.Interceptors(), - } -} - -// Get returns a BackupJob entity by its id. -func (c *BackupJobClient) Get(ctx context.Context, id int) (*BackupJob, error) { - return c.Query().Where(backupjob.ID(id)).Only(ctx) -} - -// GetX is like Get, but panics if an error occurs. -func (c *BackupJobClient) GetX(ctx context.Context, id int) *BackupJob { - obj, err := c.Get(ctx, id) - if err != nil { - panic(err) - } - return obj -} - -// QueryEvents queries the events edge of a BackupJob. -func (c *BackupJobClient) QueryEvents(_m *BackupJob) *BackupJobEventQuery { - query := (&BackupJobEventClient{config: c.config}).Query() - query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := _m.ID - step := sqlgraph.NewStep( - sqlgraph.From(backupjob.Table, backupjob.FieldID, id), - sqlgraph.To(backupjobevent.Table, backupjobevent.FieldID), - sqlgraph.Edge(sqlgraph.O2M, true, backupjob.EventsTable, backupjob.EventsColumn), - ) - fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) - return fromV, nil - } - return query -} - -// Hooks returns the client hooks. -func (c *BackupJobClient) Hooks() []Hook { - return c.hooks.BackupJob -} - -// Interceptors returns the client interceptors. -func (c *BackupJobClient) Interceptors() []Interceptor { - return c.inters.BackupJob -} - -func (c *BackupJobClient) mutate(ctx context.Context, m *BackupJobMutation) (Value, error) { - switch m.Op() { - case OpCreate: - return (&BackupJobCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdate: - return (&BackupJobUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdateOne: - return (&BackupJobUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpDelete, OpDeleteOne: - return (&BackupJobDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) - default: - return nil, fmt.Errorf("ent: unknown BackupJob mutation op: %q", m.Op()) - } -} - -// BackupJobEventClient is a client for the BackupJobEvent schema. -type BackupJobEventClient struct { - config -} - -// NewBackupJobEventClient returns a client for the BackupJobEvent from the given config. -func NewBackupJobEventClient(c config) *BackupJobEventClient { - return &BackupJobEventClient{config: c} -} - -// Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `backupjobevent.Hooks(f(g(h())))`. -func (c *BackupJobEventClient) Use(hooks ...Hook) { - c.hooks.BackupJobEvent = append(c.hooks.BackupJobEvent, hooks...) -} - -// Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `backupjobevent.Intercept(f(g(h())))`. -func (c *BackupJobEventClient) Intercept(interceptors ...Interceptor) { - c.inters.BackupJobEvent = append(c.inters.BackupJobEvent, interceptors...) -} - -// Create returns a builder for creating a BackupJobEvent entity. -func (c *BackupJobEventClient) Create() *BackupJobEventCreate { - mutation := newBackupJobEventMutation(c.config, OpCreate) - return &BackupJobEventCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// CreateBulk returns a builder for creating a bulk of BackupJobEvent entities. -func (c *BackupJobEventClient) CreateBulk(builders ...*BackupJobEventCreate) *BackupJobEventCreateBulk { - return &BackupJobEventCreateBulk{config: c.config, builders: builders} -} - -// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates -// a builder and applies setFunc on it. -func (c *BackupJobEventClient) MapCreateBulk(slice any, setFunc func(*BackupJobEventCreate, int)) *BackupJobEventCreateBulk { - rv := reflect.ValueOf(slice) - if rv.Kind() != reflect.Slice { - return &BackupJobEventCreateBulk{err: fmt.Errorf("calling to BackupJobEventClient.MapCreateBulk with wrong type %T, need slice", slice)} - } - builders := make([]*BackupJobEventCreate, rv.Len()) - for i := 0; i < rv.Len(); i++ { - builders[i] = c.Create() - setFunc(builders[i], i) - } - return &BackupJobEventCreateBulk{config: c.config, builders: builders} -} - -// Update returns an update builder for BackupJobEvent. -func (c *BackupJobEventClient) Update() *BackupJobEventUpdate { - mutation := newBackupJobEventMutation(c.config, OpUpdate) - return &BackupJobEventUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOne returns an update builder for the given entity. -func (c *BackupJobEventClient) UpdateOne(_m *BackupJobEvent) *BackupJobEventUpdateOne { - mutation := newBackupJobEventMutation(c.config, OpUpdateOne, withBackupJobEvent(_m)) - return &BackupJobEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOneID returns an update builder for the given id. -func (c *BackupJobEventClient) UpdateOneID(id int) *BackupJobEventUpdateOne { - mutation := newBackupJobEventMutation(c.config, OpUpdateOne, withBackupJobEventID(id)) - return &BackupJobEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// Delete returns a delete builder for BackupJobEvent. -func (c *BackupJobEventClient) Delete() *BackupJobEventDelete { - mutation := newBackupJobEventMutation(c.config, OpDelete) - return &BackupJobEventDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// DeleteOne returns a builder for deleting the given entity. -func (c *BackupJobEventClient) DeleteOne(_m *BackupJobEvent) *BackupJobEventDeleteOne { - return c.DeleteOneID(_m.ID) -} - -// DeleteOneID returns a builder for deleting the given entity by its id. -func (c *BackupJobEventClient) DeleteOneID(id int) *BackupJobEventDeleteOne { - builder := c.Delete().Where(backupjobevent.ID(id)) - builder.mutation.id = &id - builder.mutation.op = OpDeleteOne - return &BackupJobEventDeleteOne{builder} -} - -// Query returns a query builder for BackupJobEvent. -func (c *BackupJobEventClient) Query() *BackupJobEventQuery { - return &BackupJobEventQuery{ - config: c.config, - ctx: &QueryContext{Type: TypeBackupJobEvent}, - inters: c.Interceptors(), - } -} - -// Get returns a BackupJobEvent entity by its id. -func (c *BackupJobEventClient) Get(ctx context.Context, id int) (*BackupJobEvent, error) { - return c.Query().Where(backupjobevent.ID(id)).Only(ctx) -} - -// GetX is like Get, but panics if an error occurs. -func (c *BackupJobEventClient) GetX(ctx context.Context, id int) *BackupJobEvent { - obj, err := c.Get(ctx, id) - if err != nil { - panic(err) - } - return obj -} - -// QueryJob queries the job edge of a BackupJobEvent. -func (c *BackupJobEventClient) QueryJob(_m *BackupJobEvent) *BackupJobQuery { - query := (&BackupJobClient{config: c.config}).Query() - query.path = func(context.Context) (fromV *sql.Selector, _ error) { - id := _m.ID - step := sqlgraph.NewStep( - sqlgraph.From(backupjobevent.Table, backupjobevent.FieldID, id), - sqlgraph.To(backupjob.Table, backupjob.FieldID), - sqlgraph.Edge(sqlgraph.M2O, false, backupjobevent.JobTable, backupjobevent.JobColumn), - ) - fromV = sqlgraph.Neighbors(_m.driver.Dialect(), step) - return fromV, nil - } - return query -} - -// Hooks returns the client hooks. -func (c *BackupJobEventClient) Hooks() []Hook { - return c.hooks.BackupJobEvent -} - -// Interceptors returns the client interceptors. -func (c *BackupJobEventClient) Interceptors() []Interceptor { - return c.inters.BackupJobEvent -} - -func (c *BackupJobEventClient) mutate(ctx context.Context, m *BackupJobEventMutation) (Value, error) { - switch m.Op() { - case OpCreate: - return (&BackupJobEventCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdate: - return (&BackupJobEventUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdateOne: - return (&BackupJobEventUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpDelete, OpDeleteOne: - return (&BackupJobEventDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) - default: - return nil, fmt.Errorf("ent: unknown BackupJobEvent mutation op: %q", m.Op()) - } -} - -// BackupS3ConfigClient is a client for the BackupS3Config schema. -type BackupS3ConfigClient struct { - config -} - -// NewBackupS3ConfigClient returns a client for the BackupS3Config from the given config. -func NewBackupS3ConfigClient(c config) *BackupS3ConfigClient { - return &BackupS3ConfigClient{config: c} -} - -// Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `backups3config.Hooks(f(g(h())))`. -func (c *BackupS3ConfigClient) Use(hooks ...Hook) { - c.hooks.BackupS3Config = append(c.hooks.BackupS3Config, hooks...) -} - -// Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `backups3config.Intercept(f(g(h())))`. -func (c *BackupS3ConfigClient) Intercept(interceptors ...Interceptor) { - c.inters.BackupS3Config = append(c.inters.BackupS3Config, interceptors...) -} - -// Create returns a builder for creating a BackupS3Config entity. -func (c *BackupS3ConfigClient) Create() *BackupS3ConfigCreate { - mutation := newBackupS3ConfigMutation(c.config, OpCreate) - return &BackupS3ConfigCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// CreateBulk returns a builder for creating a bulk of BackupS3Config entities. -func (c *BackupS3ConfigClient) CreateBulk(builders ...*BackupS3ConfigCreate) *BackupS3ConfigCreateBulk { - return &BackupS3ConfigCreateBulk{config: c.config, builders: builders} -} - -// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates -// a builder and applies setFunc on it. -func (c *BackupS3ConfigClient) MapCreateBulk(slice any, setFunc func(*BackupS3ConfigCreate, int)) *BackupS3ConfigCreateBulk { - rv := reflect.ValueOf(slice) - if rv.Kind() != reflect.Slice { - return &BackupS3ConfigCreateBulk{err: fmt.Errorf("calling to BackupS3ConfigClient.MapCreateBulk with wrong type %T, need slice", slice)} - } - builders := make([]*BackupS3ConfigCreate, rv.Len()) - for i := 0; i < rv.Len(); i++ { - builders[i] = c.Create() - setFunc(builders[i], i) - } - return &BackupS3ConfigCreateBulk{config: c.config, builders: builders} -} - -// Update returns an update builder for BackupS3Config. -func (c *BackupS3ConfigClient) Update() *BackupS3ConfigUpdate { - mutation := newBackupS3ConfigMutation(c.config, OpUpdate) - return &BackupS3ConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOne returns an update builder for the given entity. -func (c *BackupS3ConfigClient) UpdateOne(_m *BackupS3Config) *BackupS3ConfigUpdateOne { - mutation := newBackupS3ConfigMutation(c.config, OpUpdateOne, withBackupS3Config(_m)) - return &BackupS3ConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOneID returns an update builder for the given id. -func (c *BackupS3ConfigClient) UpdateOneID(id int) *BackupS3ConfigUpdateOne { - mutation := newBackupS3ConfigMutation(c.config, OpUpdateOne, withBackupS3ConfigID(id)) - return &BackupS3ConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// Delete returns a delete builder for BackupS3Config. -func (c *BackupS3ConfigClient) Delete() *BackupS3ConfigDelete { - mutation := newBackupS3ConfigMutation(c.config, OpDelete) - return &BackupS3ConfigDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// DeleteOne returns a builder for deleting the given entity. -func (c *BackupS3ConfigClient) DeleteOne(_m *BackupS3Config) *BackupS3ConfigDeleteOne { - return c.DeleteOneID(_m.ID) -} - -// DeleteOneID returns a builder for deleting the given entity by its id. -func (c *BackupS3ConfigClient) DeleteOneID(id int) *BackupS3ConfigDeleteOne { - builder := c.Delete().Where(backups3config.ID(id)) - builder.mutation.id = &id - builder.mutation.op = OpDeleteOne - return &BackupS3ConfigDeleteOne{builder} -} - -// Query returns a query builder for BackupS3Config. -func (c *BackupS3ConfigClient) Query() *BackupS3ConfigQuery { - return &BackupS3ConfigQuery{ - config: c.config, - ctx: &QueryContext{Type: TypeBackupS3Config}, - inters: c.Interceptors(), - } -} - -// Get returns a BackupS3Config entity by its id. -func (c *BackupS3ConfigClient) Get(ctx context.Context, id int) (*BackupS3Config, error) { - return c.Query().Where(backups3config.ID(id)).Only(ctx) -} - -// GetX is like Get, but panics if an error occurs. -func (c *BackupS3ConfigClient) GetX(ctx context.Context, id int) *BackupS3Config { - obj, err := c.Get(ctx, id) - if err != nil { - panic(err) - } - return obj -} - -// Hooks returns the client hooks. -func (c *BackupS3ConfigClient) Hooks() []Hook { - return c.hooks.BackupS3Config -} - -// Interceptors returns the client interceptors. -func (c *BackupS3ConfigClient) Interceptors() []Interceptor { - return c.inters.BackupS3Config -} - -func (c *BackupS3ConfigClient) mutate(ctx context.Context, m *BackupS3ConfigMutation) (Value, error) { - switch m.Op() { - case OpCreate: - return (&BackupS3ConfigCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdate: - return (&BackupS3ConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdateOne: - return (&BackupS3ConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpDelete, OpDeleteOne: - return (&BackupS3ConfigDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) - default: - return nil, fmt.Errorf("ent: unknown BackupS3Config mutation op: %q", m.Op()) - } -} - -// BackupSettingClient is a client for the BackupSetting schema. -type BackupSettingClient struct { - config -} - -// NewBackupSettingClient returns a client for the BackupSetting from the given config. -func NewBackupSettingClient(c config) *BackupSettingClient { - return &BackupSettingClient{config: c} -} - -// Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `backupsetting.Hooks(f(g(h())))`. -func (c *BackupSettingClient) Use(hooks ...Hook) { - c.hooks.BackupSetting = append(c.hooks.BackupSetting, hooks...) -} - -// Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `backupsetting.Intercept(f(g(h())))`. -func (c *BackupSettingClient) Intercept(interceptors ...Interceptor) { - c.inters.BackupSetting = append(c.inters.BackupSetting, interceptors...) -} - -// Create returns a builder for creating a BackupSetting entity. -func (c *BackupSettingClient) Create() *BackupSettingCreate { - mutation := newBackupSettingMutation(c.config, OpCreate) - return &BackupSettingCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// CreateBulk returns a builder for creating a bulk of BackupSetting entities. -func (c *BackupSettingClient) CreateBulk(builders ...*BackupSettingCreate) *BackupSettingCreateBulk { - return &BackupSettingCreateBulk{config: c.config, builders: builders} -} - -// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates -// a builder and applies setFunc on it. -func (c *BackupSettingClient) MapCreateBulk(slice any, setFunc func(*BackupSettingCreate, int)) *BackupSettingCreateBulk { - rv := reflect.ValueOf(slice) - if rv.Kind() != reflect.Slice { - return &BackupSettingCreateBulk{err: fmt.Errorf("calling to BackupSettingClient.MapCreateBulk with wrong type %T, need slice", slice)} - } - builders := make([]*BackupSettingCreate, rv.Len()) - for i := 0; i < rv.Len(); i++ { - builders[i] = c.Create() - setFunc(builders[i], i) - } - return &BackupSettingCreateBulk{config: c.config, builders: builders} -} - -// Update returns an update builder for BackupSetting. -func (c *BackupSettingClient) Update() *BackupSettingUpdate { - mutation := newBackupSettingMutation(c.config, OpUpdate) - return &BackupSettingUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOne returns an update builder for the given entity. -func (c *BackupSettingClient) UpdateOne(_m *BackupSetting) *BackupSettingUpdateOne { - mutation := newBackupSettingMutation(c.config, OpUpdateOne, withBackupSetting(_m)) - return &BackupSettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOneID returns an update builder for the given id. -func (c *BackupSettingClient) UpdateOneID(id int) *BackupSettingUpdateOne { - mutation := newBackupSettingMutation(c.config, OpUpdateOne, withBackupSettingID(id)) - return &BackupSettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// Delete returns a delete builder for BackupSetting. -func (c *BackupSettingClient) Delete() *BackupSettingDelete { - mutation := newBackupSettingMutation(c.config, OpDelete) - return &BackupSettingDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// DeleteOne returns a builder for deleting the given entity. -func (c *BackupSettingClient) DeleteOne(_m *BackupSetting) *BackupSettingDeleteOne { - return c.DeleteOneID(_m.ID) -} - -// DeleteOneID returns a builder for deleting the given entity by its id. -func (c *BackupSettingClient) DeleteOneID(id int) *BackupSettingDeleteOne { - builder := c.Delete().Where(backupsetting.ID(id)) - builder.mutation.id = &id - builder.mutation.op = OpDeleteOne - return &BackupSettingDeleteOne{builder} -} - -// Query returns a query builder for BackupSetting. -func (c *BackupSettingClient) Query() *BackupSettingQuery { - return &BackupSettingQuery{ - config: c.config, - ctx: &QueryContext{Type: TypeBackupSetting}, - inters: c.Interceptors(), - } -} - -// Get returns a BackupSetting entity by its id. -func (c *BackupSettingClient) Get(ctx context.Context, id int) (*BackupSetting, error) { - return c.Query().Where(backupsetting.ID(id)).Only(ctx) -} - -// GetX is like Get, but panics if an error occurs. -func (c *BackupSettingClient) GetX(ctx context.Context, id int) *BackupSetting { - obj, err := c.Get(ctx, id) - if err != nil { - panic(err) - } - return obj -} - -// Hooks returns the client hooks. -func (c *BackupSettingClient) Hooks() []Hook { - return c.hooks.BackupSetting -} - -// Interceptors returns the client interceptors. -func (c *BackupSettingClient) Interceptors() []Interceptor { - return c.inters.BackupSetting -} - -func (c *BackupSettingClient) mutate(ctx context.Context, m *BackupSettingMutation) (Value, error) { - switch m.Op() { - case OpCreate: - return (&BackupSettingCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdate: - return (&BackupSettingUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdateOne: - return (&BackupSettingUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpDelete, OpDeleteOne: - return (&BackupSettingDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) - default: - return nil, fmt.Errorf("ent: unknown BackupSetting mutation op: %q", m.Op()) - } -} - -// BackupSourceConfigClient is a client for the BackupSourceConfig schema. -type BackupSourceConfigClient struct { - config -} - -// NewBackupSourceConfigClient returns a client for the BackupSourceConfig from the given config. -func NewBackupSourceConfigClient(c config) *BackupSourceConfigClient { - return &BackupSourceConfigClient{config: c} -} - -// Use adds a list of mutation hooks to the hooks stack. -// A call to `Use(f, g, h)` equals to `backupsourceconfig.Hooks(f(g(h())))`. -func (c *BackupSourceConfigClient) Use(hooks ...Hook) { - c.hooks.BackupSourceConfig = append(c.hooks.BackupSourceConfig, hooks...) -} - -// Intercept adds a list of query interceptors to the interceptors stack. -// A call to `Intercept(f, g, h)` equals to `backupsourceconfig.Intercept(f(g(h())))`. -func (c *BackupSourceConfigClient) Intercept(interceptors ...Interceptor) { - c.inters.BackupSourceConfig = append(c.inters.BackupSourceConfig, interceptors...) -} - -// Create returns a builder for creating a BackupSourceConfig entity. -func (c *BackupSourceConfigClient) Create() *BackupSourceConfigCreate { - mutation := newBackupSourceConfigMutation(c.config, OpCreate) - return &BackupSourceConfigCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// CreateBulk returns a builder for creating a bulk of BackupSourceConfig entities. -func (c *BackupSourceConfigClient) CreateBulk(builders ...*BackupSourceConfigCreate) *BackupSourceConfigCreateBulk { - return &BackupSourceConfigCreateBulk{config: c.config, builders: builders} -} - -// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates -// a builder and applies setFunc on it. -func (c *BackupSourceConfigClient) MapCreateBulk(slice any, setFunc func(*BackupSourceConfigCreate, int)) *BackupSourceConfigCreateBulk { - rv := reflect.ValueOf(slice) - if rv.Kind() != reflect.Slice { - return &BackupSourceConfigCreateBulk{err: fmt.Errorf("calling to BackupSourceConfigClient.MapCreateBulk with wrong type %T, need slice", slice)} - } - builders := make([]*BackupSourceConfigCreate, rv.Len()) - for i := 0; i < rv.Len(); i++ { - builders[i] = c.Create() - setFunc(builders[i], i) - } - return &BackupSourceConfigCreateBulk{config: c.config, builders: builders} -} - -// Update returns an update builder for BackupSourceConfig. -func (c *BackupSourceConfigClient) Update() *BackupSourceConfigUpdate { - mutation := newBackupSourceConfigMutation(c.config, OpUpdate) - return &BackupSourceConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOne returns an update builder for the given entity. -func (c *BackupSourceConfigClient) UpdateOne(_m *BackupSourceConfig) *BackupSourceConfigUpdateOne { - mutation := newBackupSourceConfigMutation(c.config, OpUpdateOne, withBackupSourceConfig(_m)) - return &BackupSourceConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// UpdateOneID returns an update builder for the given id. -func (c *BackupSourceConfigClient) UpdateOneID(id int) *BackupSourceConfigUpdateOne { - mutation := newBackupSourceConfigMutation(c.config, OpUpdateOne, withBackupSourceConfigID(id)) - return &BackupSourceConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// Delete returns a delete builder for BackupSourceConfig. -func (c *BackupSourceConfigClient) Delete() *BackupSourceConfigDelete { - mutation := newBackupSourceConfigMutation(c.config, OpDelete) - return &BackupSourceConfigDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} -} - -// DeleteOne returns a builder for deleting the given entity. -func (c *BackupSourceConfigClient) DeleteOne(_m *BackupSourceConfig) *BackupSourceConfigDeleteOne { - return c.DeleteOneID(_m.ID) -} - -// DeleteOneID returns a builder for deleting the given entity by its id. -func (c *BackupSourceConfigClient) DeleteOneID(id int) *BackupSourceConfigDeleteOne { - builder := c.Delete().Where(backupsourceconfig.ID(id)) - builder.mutation.id = &id - builder.mutation.op = OpDeleteOne - return &BackupSourceConfigDeleteOne{builder} -} - -// Query returns a query builder for BackupSourceConfig. -func (c *BackupSourceConfigClient) Query() *BackupSourceConfigQuery { - return &BackupSourceConfigQuery{ - config: c.config, - ctx: &QueryContext{Type: TypeBackupSourceConfig}, - inters: c.Interceptors(), - } -} - -// Get returns a BackupSourceConfig entity by its id. -func (c *BackupSourceConfigClient) Get(ctx context.Context, id int) (*BackupSourceConfig, error) { - return c.Query().Where(backupsourceconfig.ID(id)).Only(ctx) -} - -// GetX is like Get, but panics if an error occurs. -func (c *BackupSourceConfigClient) GetX(ctx context.Context, id int) *BackupSourceConfig { - obj, err := c.Get(ctx, id) - if err != nil { - panic(err) - } - return obj -} - -// Hooks returns the client hooks. -func (c *BackupSourceConfigClient) Hooks() []Hook { - return c.hooks.BackupSourceConfig -} - -// Interceptors returns the client interceptors. -func (c *BackupSourceConfigClient) Interceptors() []Interceptor { - return c.inters.BackupSourceConfig -} - -func (c *BackupSourceConfigClient) mutate(ctx context.Context, m *BackupSourceConfigMutation) (Value, error) { - switch m.Op() { - case OpCreate: - return (&BackupSourceConfigCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdate: - return (&BackupSourceConfigUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpUpdateOne: - return (&BackupSourceConfigUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) - case OpDelete, OpDeleteOne: - return (&BackupSourceConfigDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) - default: - return nil, fmt.Errorf("ent: unknown BackupSourceConfig mutation op: %q", m.Op()) - } -} - -// hooks and interceptors per client, for fast access. -type ( - hooks struct { - BackupJob, BackupJobEvent, BackupS3Config, BackupSetting, - BackupSourceConfig []ent.Hook - } - inters struct { - BackupJob, BackupJobEvent, BackupS3Config, BackupSetting, - BackupSourceConfig []ent.Interceptor - } -) diff --git a/backup/ent/ent.go b/backup/ent/ent.go deleted file mode 100644 index b6d986d43..000000000 --- a/backup/ent/ent.go +++ /dev/null @@ -1,616 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "reflect" - "sync" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "entgo.io/ent/dialect/sql/sqlgraph" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" -) - -// ent aliases to avoid import conflicts in user's code. -type ( - Op = ent.Op - Hook = ent.Hook - Value = ent.Value - Query = ent.Query - QueryContext = ent.QueryContext - Querier = ent.Querier - QuerierFunc = ent.QuerierFunc - Interceptor = ent.Interceptor - InterceptFunc = ent.InterceptFunc - Traverser = ent.Traverser - TraverseFunc = ent.TraverseFunc - Policy = ent.Policy - Mutator = ent.Mutator - Mutation = ent.Mutation - MutateFunc = ent.MutateFunc -) - -type clientCtxKey struct{} - -// FromContext returns a Client stored inside a context, or nil if there isn't one. -func FromContext(ctx context.Context) *Client { - c, _ := ctx.Value(clientCtxKey{}).(*Client) - return c -} - -// NewContext returns a new context with the given Client attached. -func NewContext(parent context.Context, c *Client) context.Context { - return context.WithValue(parent, clientCtxKey{}, c) -} - -type txCtxKey struct{} - -// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. -func TxFromContext(ctx context.Context) *Tx { - tx, _ := ctx.Value(txCtxKey{}).(*Tx) - return tx -} - -// NewTxContext returns a new context with the given Tx attached. -func NewTxContext(parent context.Context, tx *Tx) context.Context { - return context.WithValue(parent, txCtxKey{}, tx) -} - -// OrderFunc applies an ordering on the sql selector. -// Deprecated: Use Asc/Desc functions or the package builders instead. -type OrderFunc func(*sql.Selector) - -var ( - initCheck sync.Once - columnCheck sql.ColumnCheck -) - -// checkColumn checks if the column exists in the given table. -func checkColumn(t, c string) error { - initCheck.Do(func() { - columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ - backupjob.Table: backupjob.ValidColumn, - backupjobevent.Table: backupjobevent.ValidColumn, - backups3config.Table: backups3config.ValidColumn, - backupsetting.Table: backupsetting.ValidColumn, - backupsourceconfig.Table: backupsourceconfig.ValidColumn, - }) - }) - return columnCheck(t, c) -} - -// Asc applies the given fields in ASC order. -func Asc(fields ...string) func(*sql.Selector) { - return func(s *sql.Selector) { - for _, f := range fields { - if err := checkColumn(s.TableName(), f); err != nil { - s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) - } - s.OrderBy(sql.Asc(s.C(f))) - } - } -} - -// Desc applies the given fields in DESC order. -func Desc(fields ...string) func(*sql.Selector) { - return func(s *sql.Selector) { - for _, f := range fields { - if err := checkColumn(s.TableName(), f); err != nil { - s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) - } - s.OrderBy(sql.Desc(s.C(f))) - } - } -} - -// AggregateFunc applies an aggregation step on the group-by traversal/selector. -type AggregateFunc func(*sql.Selector) string - -// As is a pseudo aggregation function for renaming another other functions with custom names. For example: -// -// GroupBy(field1, field2). -// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). -// Scan(ctx, &v) -func As(fn AggregateFunc, end string) AggregateFunc { - return func(s *sql.Selector) string { - return sql.As(fn(s), end) - } -} - -// Count applies the "count" aggregation function on each group. -func Count() AggregateFunc { - return func(s *sql.Selector) string { - return sql.Count("*") - } -} - -// Max applies the "max" aggregation function on the given field of each group. -func Max(field string) AggregateFunc { - return func(s *sql.Selector) string { - if err := checkColumn(s.TableName(), field); err != nil { - s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) - return "" - } - return sql.Max(s.C(field)) - } -} - -// Mean applies the "mean" aggregation function on the given field of each group. -func Mean(field string) AggregateFunc { - return func(s *sql.Selector) string { - if err := checkColumn(s.TableName(), field); err != nil { - s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) - return "" - } - return sql.Avg(s.C(field)) - } -} - -// Min applies the "min" aggregation function on the given field of each group. -func Min(field string) AggregateFunc { - return func(s *sql.Selector) string { - if err := checkColumn(s.TableName(), field); err != nil { - s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) - return "" - } - return sql.Min(s.C(field)) - } -} - -// Sum applies the "sum" aggregation function on the given field of each group. -func Sum(field string) AggregateFunc { - return func(s *sql.Selector) string { - if err := checkColumn(s.TableName(), field); err != nil { - s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) - return "" - } - return sql.Sum(s.C(field)) - } -} - -// ValidationError returns when validating a field or edge fails. -type ValidationError struct { - Name string // Field or edge name. - err error -} - -// Error implements the error interface. -func (e *ValidationError) Error() string { - return e.err.Error() -} - -// Unwrap implements the errors.Wrapper interface. -func (e *ValidationError) Unwrap() error { - return e.err -} - -// IsValidationError returns a boolean indicating whether the error is a validation error. -func IsValidationError(err error) bool { - if err == nil { - return false - } - var e *ValidationError - return errors.As(err, &e) -} - -// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. -type NotFoundError struct { - label string -} - -// Error implements the error interface. -func (e *NotFoundError) Error() string { - return "ent: " + e.label + " not found" -} - -// IsNotFound returns a boolean indicating whether the error is a not found error. -func IsNotFound(err error) bool { - if err == nil { - return false - } - var e *NotFoundError - return errors.As(err, &e) -} - -// MaskNotFound masks not found error. -func MaskNotFound(err error) error { - if IsNotFound(err) { - return nil - } - return err -} - -// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. -type NotSingularError struct { - label string -} - -// Error implements the error interface. -func (e *NotSingularError) Error() string { - return "ent: " + e.label + " not singular" -} - -// IsNotSingular returns a boolean indicating whether the error is a not singular error. -func IsNotSingular(err error) bool { - if err == nil { - return false - } - var e *NotSingularError - return errors.As(err, &e) -} - -// NotLoadedError returns when trying to get a node that was not loaded by the query. -type NotLoadedError struct { - edge string -} - -// Error implements the error interface. -func (e *NotLoadedError) Error() string { - return "ent: " + e.edge + " edge was not loaded" -} - -// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. -func IsNotLoaded(err error) bool { - if err == nil { - return false - } - var e *NotLoadedError - return errors.As(err, &e) -} - -// ConstraintError returns when trying to create/update one or more entities and -// one or more of their constraints failed. For example, violation of edge or -// field uniqueness. -type ConstraintError struct { - msg string - wrap error -} - -// Error implements the error interface. -func (e ConstraintError) Error() string { - return "ent: constraint failed: " + e.msg -} - -// Unwrap implements the errors.Wrapper interface. -func (e *ConstraintError) Unwrap() error { - return e.wrap -} - -// IsConstraintError returns a boolean indicating whether the error is a constraint failure. -func IsConstraintError(err error) bool { - if err == nil { - return false - } - var e *ConstraintError - return errors.As(err, &e) -} - -// selector embedded by the different Select/GroupBy builders. -type selector struct { - label string - flds *[]string - fns []AggregateFunc - scan func(context.Context, any) error -} - -// ScanX is like Scan, but panics if an error occurs. -func (s *selector) ScanX(ctx context.Context, v any) { - if err := s.scan(ctx, v); err != nil { - panic(err) - } -} - -// Strings returns list of strings from a selector. It is only allowed when selecting one field. -func (s *selector) Strings(ctx context.Context) ([]string, error) { - if len(*s.flds) > 1 { - return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") - } - var v []string - if err := s.scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// StringsX is like Strings, but panics if an error occurs. -func (s *selector) StringsX(ctx context.Context) []string { - v, err := s.Strings(ctx) - if err != nil { - panic(err) - } - return v -} - -// String returns a single string from a selector. It is only allowed when selecting one field. -func (s *selector) String(ctx context.Context) (_ string, err error) { - var v []string - if v, err = s.Strings(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{s.label} - default: - err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) - } - return -} - -// StringX is like String, but panics if an error occurs. -func (s *selector) StringX(ctx context.Context) string { - v, err := s.String(ctx) - if err != nil { - panic(err) - } - return v -} - -// Ints returns list of ints from a selector. It is only allowed when selecting one field. -func (s *selector) Ints(ctx context.Context) ([]int, error) { - if len(*s.flds) > 1 { - return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") - } - var v []int - if err := s.scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// IntsX is like Ints, but panics if an error occurs. -func (s *selector) IntsX(ctx context.Context) []int { - v, err := s.Ints(ctx) - if err != nil { - panic(err) - } - return v -} - -// Int returns a single int from a selector. It is only allowed when selecting one field. -func (s *selector) Int(ctx context.Context) (_ int, err error) { - var v []int - if v, err = s.Ints(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{s.label} - default: - err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) - } - return -} - -// IntX is like Int, but panics if an error occurs. -func (s *selector) IntX(ctx context.Context) int { - v, err := s.Int(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. -func (s *selector) Float64s(ctx context.Context) ([]float64, error) { - if len(*s.flds) > 1 { - return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") - } - var v []float64 - if err := s.scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// Float64sX is like Float64s, but panics if an error occurs. -func (s *selector) Float64sX(ctx context.Context) []float64 { - v, err := s.Float64s(ctx) - if err != nil { - panic(err) - } - return v -} - -// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. -func (s *selector) Float64(ctx context.Context) (_ float64, err error) { - var v []float64 - if v, err = s.Float64s(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{s.label} - default: - err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) - } - return -} - -// Float64X is like Float64, but panics if an error occurs. -func (s *selector) Float64X(ctx context.Context) float64 { - v, err := s.Float64(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bools returns list of bools from a selector. It is only allowed when selecting one field. -func (s *selector) Bools(ctx context.Context) ([]bool, error) { - if len(*s.flds) > 1 { - return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") - } - var v []bool - if err := s.scan(ctx, &v); err != nil { - return nil, err - } - return v, nil -} - -// BoolsX is like Bools, but panics if an error occurs. -func (s *selector) BoolsX(ctx context.Context) []bool { - v, err := s.Bools(ctx) - if err != nil { - panic(err) - } - return v -} - -// Bool returns a single bool from a selector. It is only allowed when selecting one field. -func (s *selector) Bool(ctx context.Context) (_ bool, err error) { - var v []bool - if v, err = s.Bools(ctx); err != nil { - return - } - switch len(v) { - case 1: - return v[0], nil - case 0: - err = &NotFoundError{s.label} - default: - err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) - } - return -} - -// BoolX is like Bool, but panics if an error occurs. -func (s *selector) BoolX(ctx context.Context) bool { - v, err := s.Bool(ctx) - if err != nil { - panic(err) - } - return v -} - -// withHooks invokes the builder operation with the given hooks, if any. -func withHooks[V Value, M any, PM interface { - *M - Mutation -}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { - if len(hooks) == 0 { - return exec(ctx) - } - var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { - mutationT, ok := any(m).(PM) - if !ok { - return nil, fmt.Errorf("unexpected mutation type %T", m) - } - // Set the mutation to the builder. - *mutation = *mutationT - return exec(ctx) - }) - for i := len(hooks) - 1; i >= 0; i-- { - if hooks[i] == nil { - return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") - } - mut = hooks[i](mut) - } - v, err := mut.Mutate(ctx, mutation) - if err != nil { - return value, err - } - nv, ok := v.(V) - if !ok { - return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) - } - return nv, nil -} - -// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. -func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { - if ent.QueryFromContext(ctx) == nil { - qc.Op = op - ctx = ent.NewQueryContext(ctx, qc) - } - return ctx -} - -func querierAll[V Value, Q interface { - sqlAll(context.Context, ...queryHook) (V, error) -}]() Querier { - return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { - query, ok := q.(Q) - if !ok { - return nil, fmt.Errorf("unexpected query type %T", q) - } - return query.sqlAll(ctx) - }) -} - -func querierCount[Q interface { - sqlCount(context.Context) (int, error) -}]() Querier { - return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { - query, ok := q.(Q) - if !ok { - return nil, fmt.Errorf("unexpected query type %T", q) - } - return query.sqlCount(ctx) - }) -} - -func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { - for i := len(inters) - 1; i >= 0; i-- { - qr = inters[i].Intercept(qr) - } - rv, err := qr.Query(ctx, q) - if err != nil { - return v, err - } - vt, ok := rv.(V) - if !ok { - return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) - } - return vt, nil -} - -func scanWithInterceptors[Q1 ent.Query, Q2 interface { - sqlScan(context.Context, Q1, any) error -}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { - rv := reflect.ValueOf(v) - var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { - query, ok := q.(Q1) - if !ok { - return nil, fmt.Errorf("unexpected query type %T", q) - } - if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { - return nil, err - } - if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { - return rv.Elem().Interface(), nil - } - return v, nil - }) - for i := len(inters) - 1; i >= 0; i-- { - qr = inters[i].Intercept(qr) - } - vv, err := qr.Query(ctx, rootQuery) - if err != nil { - return err - } - switch rv2 := reflect.ValueOf(vv); { - case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: - case rv.Type() == rv2.Type(): - rv.Elem().Set(rv2.Elem()) - case rv.Elem().Type() == rv2.Type(): - rv.Elem().Set(rv2) - } - return nil -} - -// queryHook describes an internal hook for the different sqlAll methods. -type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/backup/ent/enttest/enttest.go b/backup/ent/enttest/enttest.go deleted file mode 100644 index 975ea37b9..000000000 --- a/backup/ent/enttest/enttest.go +++ /dev/null @@ -1,84 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package enttest - -import ( - "context" - - "github.com/Wei-Shaw/sub2api/backup/ent" - // required by schema hooks. - _ "github.com/Wei-Shaw/sub2api/backup/ent/runtime" - - "entgo.io/ent/dialect/sql/schema" - "github.com/Wei-Shaw/sub2api/backup/ent/migrate" -) - -type ( - // TestingT is the interface that is shared between - // testing.T and testing.B and used by enttest. - TestingT interface { - FailNow() - Error(...any) - } - - // Option configures client creation. - Option func(*options) - - options struct { - opts []ent.Option - migrateOpts []schema.MigrateOption - } -) - -// WithOptions forwards options to client creation. -func WithOptions(opts ...ent.Option) Option { - return func(o *options) { - o.opts = append(o.opts, opts...) - } -} - -// WithMigrateOptions forwards options to auto migration. -func WithMigrateOptions(opts ...schema.MigrateOption) Option { - return func(o *options) { - o.migrateOpts = append(o.migrateOpts, opts...) - } -} - -func newOptions(opts []Option) *options { - o := &options{} - for _, opt := range opts { - opt(o) - } - return o -} - -// Open calls ent.Open and auto-run migration. -func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { - o := newOptions(opts) - c, err := ent.Open(driverName, dataSourceName, o.opts...) - if err != nil { - t.Error(err) - t.FailNow() - } - migrateSchema(t, c, o) - return c -} - -// NewClient calls ent.NewClient and auto-run migration. -func NewClient(t TestingT, opts ...Option) *ent.Client { - o := newOptions(opts) - c := ent.NewClient(o.opts...) - migrateSchema(t, c, o) - return c -} -func migrateSchema(t TestingT, c *ent.Client, o *options) { - tables, err := schema.CopyTables(migrate.Tables) - if err != nil { - t.Error(err) - t.FailNow() - } - if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { - t.Error(err) - t.FailNow() - } -} diff --git a/backup/ent/generate.go b/backup/ent/generate.go deleted file mode 100644 index 9fdb1068c..000000000 --- a/backup/ent/generate.go +++ /dev/null @@ -1,3 +0,0 @@ -package ent - -//go:generate go run entgo.io/ent/cmd/ent generate ./schema diff --git a/backup/ent/hook/hook.go b/backup/ent/hook/hook.go deleted file mode 100644 index 5db0dd794..000000000 --- a/backup/ent/hook/hook.go +++ /dev/null @@ -1,247 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package hook - -import ( - "context" - "fmt" - - "github.com/Wei-Shaw/sub2api/backup/ent" -) - -// The BackupJobFunc type is an adapter to allow the use of ordinary -// function as BackupJob mutator. -type BackupJobFunc func(context.Context, *ent.BackupJobMutation) (ent.Value, error) - -// Mutate calls f(ctx, m). -func (f BackupJobFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - if mv, ok := m.(*ent.BackupJobMutation); ok { - return f(ctx, mv) - } - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupJobMutation", m) -} - -// The BackupJobEventFunc type is an adapter to allow the use of ordinary -// function as BackupJobEvent mutator. -type BackupJobEventFunc func(context.Context, *ent.BackupJobEventMutation) (ent.Value, error) - -// Mutate calls f(ctx, m). -func (f BackupJobEventFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - if mv, ok := m.(*ent.BackupJobEventMutation); ok { - return f(ctx, mv) - } - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupJobEventMutation", m) -} - -// The BackupS3ConfigFunc type is an adapter to allow the use of ordinary -// function as BackupS3Config mutator. -type BackupS3ConfigFunc func(context.Context, *ent.BackupS3ConfigMutation) (ent.Value, error) - -// Mutate calls f(ctx, m). -func (f BackupS3ConfigFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - if mv, ok := m.(*ent.BackupS3ConfigMutation); ok { - return f(ctx, mv) - } - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupS3ConfigMutation", m) -} - -// The BackupSettingFunc type is an adapter to allow the use of ordinary -// function as BackupSetting mutator. -type BackupSettingFunc func(context.Context, *ent.BackupSettingMutation) (ent.Value, error) - -// Mutate calls f(ctx, m). -func (f BackupSettingFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - if mv, ok := m.(*ent.BackupSettingMutation); ok { - return f(ctx, mv) - } - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupSettingMutation", m) -} - -// The BackupSourceConfigFunc type is an adapter to allow the use of ordinary -// function as BackupSourceConfig mutator. -type BackupSourceConfigFunc func(context.Context, *ent.BackupSourceConfigMutation) (ent.Value, error) - -// Mutate calls f(ctx, m). -func (f BackupSourceConfigFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { - if mv, ok := m.(*ent.BackupSourceConfigMutation); ok { - return f(ctx, mv) - } - return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.BackupSourceConfigMutation", m) -} - -// Condition is a hook condition function. -type Condition func(context.Context, ent.Mutation) bool - -// And groups conditions with the AND operator. -func And(first, second Condition, rest ...Condition) Condition { - return func(ctx context.Context, m ent.Mutation) bool { - if !first(ctx, m) || !second(ctx, m) { - return false - } - for _, cond := range rest { - if !cond(ctx, m) { - return false - } - } - return true - } -} - -// Or groups conditions with the OR operator. -func Or(first, second Condition, rest ...Condition) Condition { - return func(ctx context.Context, m ent.Mutation) bool { - if first(ctx, m) || second(ctx, m) { - return true - } - for _, cond := range rest { - if cond(ctx, m) { - return true - } - } - return false - } -} - -// Not negates a given condition. -func Not(cond Condition) Condition { - return func(ctx context.Context, m ent.Mutation) bool { - return !cond(ctx, m) - } -} - -// HasOp is a condition testing mutation operation. -func HasOp(op ent.Op) Condition { - return func(_ context.Context, m ent.Mutation) bool { - return m.Op().Is(op) - } -} - -// HasAddedFields is a condition validating `.AddedField` on fields. -func HasAddedFields(field string, fields ...string) Condition { - return func(_ context.Context, m ent.Mutation) bool { - if _, exists := m.AddedField(field); !exists { - return false - } - for _, field := range fields { - if _, exists := m.AddedField(field); !exists { - return false - } - } - return true - } -} - -// HasClearedFields is a condition validating `.FieldCleared` on fields. -func HasClearedFields(field string, fields ...string) Condition { - return func(_ context.Context, m ent.Mutation) bool { - if exists := m.FieldCleared(field); !exists { - return false - } - for _, field := range fields { - if exists := m.FieldCleared(field); !exists { - return false - } - } - return true - } -} - -// HasFields is a condition validating `.Field` on fields. -func HasFields(field string, fields ...string) Condition { - return func(_ context.Context, m ent.Mutation) bool { - if _, exists := m.Field(field); !exists { - return false - } - for _, field := range fields { - if _, exists := m.Field(field); !exists { - return false - } - } - return true - } -} - -// If executes the given hook under condition. -// -// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) -func If(hk ent.Hook, cond Condition) ent.Hook { - return func(next ent.Mutator) ent.Mutator { - return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { - if cond(ctx, m) { - return hk(next).Mutate(ctx, m) - } - return next.Mutate(ctx, m) - }) - } -} - -// On executes the given hook only for the given operation. -// -// hook.On(Log, ent.Delete|ent.Create) -func On(hk ent.Hook, op ent.Op) ent.Hook { - return If(hk, HasOp(op)) -} - -// Unless skips the given hook only for the given operation. -// -// hook.Unless(Log, ent.Update|ent.UpdateOne) -func Unless(hk ent.Hook, op ent.Op) ent.Hook { - return If(hk, Not(HasOp(op))) -} - -// FixedError is a hook returning a fixed error. -func FixedError(err error) ent.Hook { - return func(ent.Mutator) ent.Mutator { - return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { - return nil, err - }) - } -} - -// Reject returns a hook that rejects all operations that match op. -// -// func (T) Hooks() []ent.Hook { -// return []ent.Hook{ -// Reject(ent.Delete|ent.Update), -// } -// } -func Reject(op ent.Op) ent.Hook { - hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) - return On(hk, op) -} - -// Chain acts as a list of hooks and is effectively immutable. -// Once created, it will always hold the same set of hooks in the same order. -type Chain struct { - hooks []ent.Hook -} - -// NewChain creates a new chain of hooks. -func NewChain(hooks ...ent.Hook) Chain { - return Chain{append([]ent.Hook(nil), hooks...)} -} - -// Hook chains the list of hooks and returns the final hook. -func (c Chain) Hook() ent.Hook { - return func(mutator ent.Mutator) ent.Mutator { - for i := len(c.hooks) - 1; i >= 0; i-- { - mutator = c.hooks[i](mutator) - } - return mutator - } -} - -// Append extends a chain, adding the specified hook -// as the last ones in the mutation flow. -func (c Chain) Append(hooks ...ent.Hook) Chain { - newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) - newHooks = append(newHooks, c.hooks...) - newHooks = append(newHooks, hooks...) - return Chain{newHooks} -} - -// Extend extends a chain, adding the specified chain -// as the last ones in the mutation flow. -func (c Chain) Extend(chain Chain) Chain { - return c.Append(chain.hooks...) -} diff --git a/backup/ent/migrate/migrate.go b/backup/ent/migrate/migrate.go deleted file mode 100644 index 1956a6bf6..000000000 --- a/backup/ent/migrate/migrate.go +++ /dev/null @@ -1,64 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package migrate - -import ( - "context" - "fmt" - "io" - - "entgo.io/ent/dialect" - "entgo.io/ent/dialect/sql/schema" -) - -var ( - // WithGlobalUniqueID sets the universal ids options to the migration. - // If this option is enabled, ent migration will allocate a 1<<32 range - // for the ids of each entity (table). - // Note that this option cannot be applied on tables that already exist. - WithGlobalUniqueID = schema.WithGlobalUniqueID - // WithDropColumn sets the drop column option to the migration. - // If this option is enabled, ent migration will drop old columns - // that were used for both fields and edges. This defaults to false. - WithDropColumn = schema.WithDropColumn - // WithDropIndex sets the drop index option to the migration. - // If this option is enabled, ent migration will drop old indexes - // that were defined in the schema. This defaults to false. - // Note that unique constraints are defined using `UNIQUE INDEX`, - // and therefore, it's recommended to enable this option to get more - // flexibility in the schema changes. - WithDropIndex = schema.WithDropIndex - // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. - WithForeignKeys = schema.WithForeignKeys -) - -// Schema is the API for creating, migrating and dropping a schema. -type Schema struct { - drv dialect.Driver -} - -// NewSchema creates a new schema client. -func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } - -// Create creates all schema resources. -func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { - return Create(ctx, s, Tables, opts...) -} - -// Create creates all table resources using the given schema driver. -func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { - migrate, err := schema.NewMigrate(s.drv, opts...) - if err != nil { - return fmt.Errorf("ent/migrate: %w", err) - } - return migrate.Create(ctx, tables...) -} - -// WriteTo writes the schema changes to w instead of running them against the database. -// -// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { -// log.Fatal(err) -// } -func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { - return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) -} diff --git a/backup/ent/migrate/schema.go b/backup/ent/migrate/schema.go deleted file mode 100644 index 5d55772db..000000000 --- a/backup/ent/migrate/schema.go +++ /dev/null @@ -1,207 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package migrate - -import ( - "entgo.io/ent/dialect/sql/schema" - "entgo.io/ent/schema/field" -) - -var ( - // BackupJobsColumns holds the columns for the "backup_jobs" table. - BackupJobsColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "job_id", Type: field.TypeString, Unique: true}, - {Name: "backup_type", Type: field.TypeEnum, Enums: []string{"postgres", "redis", "full"}}, - {Name: "status", Type: field.TypeEnum, Enums: []string{"queued", "running", "succeeded", "failed", "partial_succeeded"}, Default: "queued"}, - {Name: "triggered_by", Type: field.TypeString, Default: "system"}, - {Name: "idempotency_key", Type: field.TypeString, Nullable: true}, - {Name: "upload_to_s3", Type: field.TypeBool, Default: false}, - {Name: "s3_profile_id", Type: field.TypeString, Nullable: true}, - {Name: "postgres_profile_id", Type: field.TypeString, Nullable: true}, - {Name: "redis_profile_id", Type: field.TypeString, Nullable: true}, - {Name: "started_at", Type: field.TypeTime, Nullable: true}, - {Name: "finished_at", Type: field.TypeTime, Nullable: true}, - {Name: "error_message", Type: field.TypeString, Nullable: true}, - {Name: "artifact_local_path", Type: field.TypeString, Nullable: true}, - {Name: "artifact_size_bytes", Type: field.TypeInt64, Nullable: true}, - {Name: "artifact_sha256", Type: field.TypeString, Nullable: true}, - {Name: "s3_bucket", Type: field.TypeString, Nullable: true}, - {Name: "s3_key", Type: field.TypeString, Nullable: true}, - {Name: "s3_etag", Type: field.TypeString, Nullable: true}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "updated_at", Type: field.TypeTime}, - } - // BackupJobsTable holds the schema information for the "backup_jobs" table. - BackupJobsTable = &schema.Table{ - Name: "backup_jobs", - Columns: BackupJobsColumns, - PrimaryKey: []*schema.Column{BackupJobsColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "backupjob_status_created_at", - Unique: false, - Columns: []*schema.Column{BackupJobsColumns[3], BackupJobsColumns[19]}, - }, - { - Name: "backupjob_backup_type_created_at", - Unique: false, - Columns: []*schema.Column{BackupJobsColumns[2], BackupJobsColumns[19]}, - }, - { - Name: "backupjob_idempotency_key", - Unique: false, - Columns: []*schema.Column{BackupJobsColumns[5]}, - }, - { - Name: "backupjob_s3_profile_id_status", - Unique: false, - Columns: []*schema.Column{BackupJobsColumns[7], BackupJobsColumns[3]}, - }, - { - Name: "backupjob_postgres_profile_id_status", - Unique: false, - Columns: []*schema.Column{BackupJobsColumns[8], BackupJobsColumns[3]}, - }, - { - Name: "backupjob_redis_profile_id_status", - Unique: false, - Columns: []*schema.Column{BackupJobsColumns[9], BackupJobsColumns[3]}, - }, - }, - } - // BackupJobEventsColumns holds the columns for the "backup_job_events" table. - BackupJobEventsColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "level", Type: field.TypeEnum, Enums: []string{"info", "warning", "error"}, Default: "info"}, - {Name: "event_type", Type: field.TypeString, Default: "state_change"}, - {Name: "message", Type: field.TypeString}, - {Name: "payload", Type: field.TypeString, Nullable: true}, - {Name: "event_time", Type: field.TypeTime}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "backup_job_id", Type: field.TypeInt}, - } - // BackupJobEventsTable holds the schema information for the "backup_job_events" table. - BackupJobEventsTable = &schema.Table{ - Name: "backup_job_events", - Columns: BackupJobEventsColumns, - PrimaryKey: []*schema.Column{BackupJobEventsColumns[0]}, - ForeignKeys: []*schema.ForeignKey{ - { - Symbol: "backup_job_events_backup_jobs_job", - Columns: []*schema.Column{BackupJobEventsColumns[7]}, - RefColumns: []*schema.Column{BackupJobsColumns[0]}, - OnDelete: schema.NoAction, - }, - }, - Indexes: []*schema.Index{ - { - Name: "backupjobevent_backup_job_id_event_time", - Unique: false, - Columns: []*schema.Column{BackupJobEventsColumns[7], BackupJobEventsColumns[5]}, - }, - }, - } - // BackupS3configsColumns holds the columns for the "backup_s3configs" table. - BackupS3configsColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "profile_id", Type: field.TypeString, Default: "default"}, - {Name: "name", Type: field.TypeString, Default: "默认账号"}, - {Name: "is_active", Type: field.TypeBool, Default: false}, - {Name: "enabled", Type: field.TypeBool, Default: false}, - {Name: "endpoint", Type: field.TypeString, Default: ""}, - {Name: "region", Type: field.TypeString, Default: ""}, - {Name: "bucket", Type: field.TypeString, Default: ""}, - {Name: "access_key_id", Type: field.TypeString, Default: ""}, - {Name: "secret_access_key_encrypted", Type: field.TypeString, Nullable: true}, - {Name: "prefix", Type: field.TypeString, Default: ""}, - {Name: "force_path_style", Type: field.TypeBool, Default: false}, - {Name: "use_ssl", Type: field.TypeBool, Default: true}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "updated_at", Type: field.TypeTime}, - } - // BackupS3configsTable holds the schema information for the "backup_s3configs" table. - BackupS3configsTable = &schema.Table{ - Name: "backup_s3configs", - Columns: BackupS3configsColumns, - PrimaryKey: []*schema.Column{BackupS3configsColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "backups3config_profile_id", - Unique: true, - Columns: []*schema.Column{BackupS3configsColumns[1]}, - }, - { - Name: "backups3config_is_active", - Unique: false, - Columns: []*schema.Column{BackupS3configsColumns[3]}, - }, - }, - } - // BackupSettingsColumns holds the columns for the "backup_settings" table. - BackupSettingsColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "source_mode", Type: field.TypeEnum, Enums: []string{"direct", "docker_exec"}, Default: "direct"}, - {Name: "backup_root", Type: field.TypeString, Default: "/var/lib/sub2api/backups"}, - {Name: "retention_days", Type: field.TypeInt, Default: 7}, - {Name: "keep_last", Type: field.TypeInt, Default: 30}, - {Name: "sqlite_path", Type: field.TypeString, Default: "/var/lib/sub2api/backupd.db"}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "updated_at", Type: field.TypeTime}, - } - // BackupSettingsTable holds the schema information for the "backup_settings" table. - BackupSettingsTable = &schema.Table{ - Name: "backup_settings", - Columns: BackupSettingsColumns, - PrimaryKey: []*schema.Column{BackupSettingsColumns[0]}, - } - // BackupSourceConfigsColumns holds the columns for the "backup_source_configs" table. - BackupSourceConfigsColumns = []*schema.Column{ - {Name: "id", Type: field.TypeInt, Increment: true}, - {Name: "source_type", Type: field.TypeEnum, Enums: []string{"postgres", "redis"}}, - {Name: "profile_id", Type: field.TypeString}, - {Name: "name", Type: field.TypeString}, - {Name: "is_active", Type: field.TypeBool, Default: false}, - {Name: "host", Type: field.TypeString, Nullable: true}, - {Name: "port", Type: field.TypeInt, Nullable: true}, - {Name: "username", Type: field.TypeString, Nullable: true}, - {Name: "password_encrypted", Type: field.TypeString, Nullable: true}, - {Name: "database", Type: field.TypeString, Nullable: true}, - {Name: "ssl_mode", Type: field.TypeString, Nullable: true}, - {Name: "addr", Type: field.TypeString, Nullable: true}, - {Name: "redis_db", Type: field.TypeInt, Nullable: true}, - {Name: "container_name", Type: field.TypeString, Default: ""}, - {Name: "created_at", Type: field.TypeTime}, - {Name: "updated_at", Type: field.TypeTime}, - } - // BackupSourceConfigsTable holds the schema information for the "backup_source_configs" table. - BackupSourceConfigsTable = &schema.Table{ - Name: "backup_source_configs", - Columns: BackupSourceConfigsColumns, - PrimaryKey: []*schema.Column{BackupSourceConfigsColumns[0]}, - Indexes: []*schema.Index{ - { - Name: "backupsourceconfig_source_type_profile_id", - Unique: true, - Columns: []*schema.Column{BackupSourceConfigsColumns[1], BackupSourceConfigsColumns[2]}, - }, - { - Name: "backupsourceconfig_source_type_is_active", - Unique: false, - Columns: []*schema.Column{BackupSourceConfigsColumns[1], BackupSourceConfigsColumns[4]}, - }, - }, - } - // Tables holds all the tables in the schema. - Tables = []*schema.Table{ - BackupJobsTable, - BackupJobEventsTable, - BackupS3configsTable, - BackupSettingsTable, - BackupSourceConfigsTable, - } -) - -func init() { - BackupJobEventsTable.ForeignKeys[0].RefTable = BackupJobsTable -} diff --git a/backup/ent/mutation.go b/backup/ent/mutation.go deleted file mode 100644 index e3f1fc0dc..000000000 --- a/backup/ent/mutation.go +++ /dev/null @@ -1,5587 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "errors" - "fmt" - "sync" - "time" - - "entgo.io/ent" - "entgo.io/ent/dialect/sql" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" - "github.com/Wei-Shaw/sub2api/backup/ent/predicate" -) - -const ( - // Operation types. - OpCreate = ent.OpCreate - OpDelete = ent.OpDelete - OpDeleteOne = ent.OpDeleteOne - OpUpdate = ent.OpUpdate - OpUpdateOne = ent.OpUpdateOne - - // Node types. - TypeBackupJob = "BackupJob" - TypeBackupJobEvent = "BackupJobEvent" - TypeBackupS3Config = "BackupS3Config" - TypeBackupSetting = "BackupSetting" - TypeBackupSourceConfig = "BackupSourceConfig" -) - -// BackupJobMutation represents an operation that mutates the BackupJob nodes in the graph. -type BackupJobMutation struct { - config - op Op - typ string - id *int - job_id *string - backup_type *backupjob.BackupType - status *backupjob.Status - triggered_by *string - idempotency_key *string - upload_to_s3 *bool - s3_profile_id *string - postgres_profile_id *string - redis_profile_id *string - started_at *time.Time - finished_at *time.Time - error_message *string - artifact_local_path *string - artifact_size_bytes *int64 - addartifact_size_bytes *int64 - artifact_sha256 *string - s3_bucket *string - s3_key *string - s3_etag *string - created_at *time.Time - updated_at *time.Time - clearedFields map[string]struct{} - events map[int]struct{} - removedevents map[int]struct{} - clearedevents bool - done bool - oldValue func(context.Context) (*BackupJob, error) - predicates []predicate.BackupJob -} - -var _ ent.Mutation = (*BackupJobMutation)(nil) - -// backupjobOption allows management of the mutation configuration using functional options. -type backupjobOption func(*BackupJobMutation) - -// newBackupJobMutation creates new mutation for the BackupJob entity. -func newBackupJobMutation(c config, op Op, opts ...backupjobOption) *BackupJobMutation { - m := &BackupJobMutation{ - config: c, - op: op, - typ: TypeBackupJob, - clearedFields: make(map[string]struct{}), - } - for _, opt := range opts { - opt(m) - } - return m -} - -// withBackupJobID sets the ID field of the mutation. -func withBackupJobID(id int) backupjobOption { - return func(m *BackupJobMutation) { - var ( - err error - once sync.Once - value *BackupJob - ) - m.oldValue = func(ctx context.Context) (*BackupJob, error) { - once.Do(func() { - if m.done { - err = errors.New("querying old values post mutation is not allowed") - } else { - value, err = m.Client().BackupJob.Get(ctx, id) - } - }) - return value, err - } - m.id = &id - } -} - -// withBackupJob sets the old BackupJob of the mutation. -func withBackupJob(node *BackupJob) backupjobOption { - return func(m *BackupJobMutation) { - m.oldValue = func(context.Context) (*BackupJob, error) { - return node, nil - } - m.id = &node.ID - } -} - -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m BackupJobMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client -} - -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m BackupJobMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("ent: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil -} - -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *BackupJobMutation) ID() (id int, exists bool) { - if m.id == nil { - return - } - return *m.id, true -} - -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *BackupJobMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().BackupJob.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) - } -} - -// SetJobID sets the "job_id" field. -func (m *BackupJobMutation) SetJobID(s string) { - m.job_id = &s -} - -// JobID returns the value of the "job_id" field in the mutation. -func (m *BackupJobMutation) JobID() (r string, exists bool) { - v := m.job_id - if v == nil { - return - } - return *v, true -} - -// OldJobID returns the old "job_id" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldJobID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldJobID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldJobID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldJobID: %w", err) - } - return oldValue.JobID, nil -} - -// ResetJobID resets all changes to the "job_id" field. -func (m *BackupJobMutation) ResetJobID() { - m.job_id = nil -} - -// SetBackupType sets the "backup_type" field. -func (m *BackupJobMutation) SetBackupType(bt backupjob.BackupType) { - m.backup_type = &bt -} - -// BackupType returns the value of the "backup_type" field in the mutation. -func (m *BackupJobMutation) BackupType() (r backupjob.BackupType, exists bool) { - v := m.backup_type - if v == nil { - return - } - return *v, true -} - -// OldBackupType returns the old "backup_type" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldBackupType(ctx context.Context) (v backupjob.BackupType, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldBackupType is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldBackupType requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldBackupType: %w", err) - } - return oldValue.BackupType, nil -} - -// ResetBackupType resets all changes to the "backup_type" field. -func (m *BackupJobMutation) ResetBackupType() { - m.backup_type = nil -} - -// SetStatus sets the "status" field. -func (m *BackupJobMutation) SetStatus(b backupjob.Status) { - m.status = &b -} - -// Status returns the value of the "status" field in the mutation. -func (m *BackupJobMutation) Status() (r backupjob.Status, exists bool) { - v := m.status - if v == nil { - return - } - return *v, true -} - -// OldStatus returns the old "status" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldStatus(ctx context.Context) (v backupjob.Status, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStatus is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStatus requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldStatus: %w", err) - } - return oldValue.Status, nil -} - -// ResetStatus resets all changes to the "status" field. -func (m *BackupJobMutation) ResetStatus() { - m.status = nil -} - -// SetTriggeredBy sets the "triggered_by" field. -func (m *BackupJobMutation) SetTriggeredBy(s string) { - m.triggered_by = &s -} - -// TriggeredBy returns the value of the "triggered_by" field in the mutation. -func (m *BackupJobMutation) TriggeredBy() (r string, exists bool) { - v := m.triggered_by - if v == nil { - return - } - return *v, true -} - -// OldTriggeredBy returns the old "triggered_by" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldTriggeredBy(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldTriggeredBy is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldTriggeredBy requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldTriggeredBy: %w", err) - } - return oldValue.TriggeredBy, nil -} - -// ResetTriggeredBy resets all changes to the "triggered_by" field. -func (m *BackupJobMutation) ResetTriggeredBy() { - m.triggered_by = nil -} - -// SetIdempotencyKey sets the "idempotency_key" field. -func (m *BackupJobMutation) SetIdempotencyKey(s string) { - m.idempotency_key = &s -} - -// IdempotencyKey returns the value of the "idempotency_key" field in the mutation. -func (m *BackupJobMutation) IdempotencyKey() (r string, exists bool) { - v := m.idempotency_key - if v == nil { - return - } - return *v, true -} - -// OldIdempotencyKey returns the old "idempotency_key" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldIdempotencyKey(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldIdempotencyKey is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldIdempotencyKey requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldIdempotencyKey: %w", err) - } - return oldValue.IdempotencyKey, nil -} - -// ClearIdempotencyKey clears the value of the "idempotency_key" field. -func (m *BackupJobMutation) ClearIdempotencyKey() { - m.idempotency_key = nil - m.clearedFields[backupjob.FieldIdempotencyKey] = struct{}{} -} - -// IdempotencyKeyCleared returns if the "idempotency_key" field was cleared in this mutation. -func (m *BackupJobMutation) IdempotencyKeyCleared() bool { - _, ok := m.clearedFields[backupjob.FieldIdempotencyKey] - return ok -} - -// ResetIdempotencyKey resets all changes to the "idempotency_key" field. -func (m *BackupJobMutation) ResetIdempotencyKey() { - m.idempotency_key = nil - delete(m.clearedFields, backupjob.FieldIdempotencyKey) -} - -// SetUploadToS3 sets the "upload_to_s3" field. -func (m *BackupJobMutation) SetUploadToS3(b bool) { - m.upload_to_s3 = &b -} - -// UploadToS3 returns the value of the "upload_to_s3" field in the mutation. -func (m *BackupJobMutation) UploadToS3() (r bool, exists bool) { - v := m.upload_to_s3 - if v == nil { - return - } - return *v, true -} - -// OldUploadToS3 returns the old "upload_to_s3" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldUploadToS3(ctx context.Context) (v bool, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUploadToS3 is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUploadToS3 requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUploadToS3: %w", err) - } - return oldValue.UploadToS3, nil -} - -// ResetUploadToS3 resets all changes to the "upload_to_s3" field. -func (m *BackupJobMutation) ResetUploadToS3() { - m.upload_to_s3 = nil -} - -// SetS3ProfileID sets the "s3_profile_id" field. -func (m *BackupJobMutation) SetS3ProfileID(s string) { - m.s3_profile_id = &s -} - -// S3ProfileID returns the value of the "s3_profile_id" field in the mutation. -func (m *BackupJobMutation) S3ProfileID() (r string, exists bool) { - v := m.s3_profile_id - if v == nil { - return - } - return *v, true -} - -// OldS3ProfileID returns the old "s3_profile_id" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldS3ProfileID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldS3ProfileID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldS3ProfileID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldS3ProfileID: %w", err) - } - return oldValue.S3ProfileID, nil -} - -// ClearS3ProfileID clears the value of the "s3_profile_id" field. -func (m *BackupJobMutation) ClearS3ProfileID() { - m.s3_profile_id = nil - m.clearedFields[backupjob.FieldS3ProfileID] = struct{}{} -} - -// S3ProfileIDCleared returns if the "s3_profile_id" field was cleared in this mutation. -func (m *BackupJobMutation) S3ProfileIDCleared() bool { - _, ok := m.clearedFields[backupjob.FieldS3ProfileID] - return ok -} - -// ResetS3ProfileID resets all changes to the "s3_profile_id" field. -func (m *BackupJobMutation) ResetS3ProfileID() { - m.s3_profile_id = nil - delete(m.clearedFields, backupjob.FieldS3ProfileID) -} - -// SetPostgresProfileID sets the "postgres_profile_id" field. -func (m *BackupJobMutation) SetPostgresProfileID(s string) { - m.postgres_profile_id = &s -} - -// PostgresProfileID returns the value of the "postgres_profile_id" field in the mutation. -func (m *BackupJobMutation) PostgresProfileID() (r string, exists bool) { - v := m.postgres_profile_id - if v == nil { - return - } - return *v, true -} - -// OldPostgresProfileID returns the old "postgres_profile_id" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldPostgresProfileID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPostgresProfileID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPostgresProfileID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPostgresProfileID: %w", err) - } - return oldValue.PostgresProfileID, nil -} - -// ClearPostgresProfileID clears the value of the "postgres_profile_id" field. -func (m *BackupJobMutation) ClearPostgresProfileID() { - m.postgres_profile_id = nil - m.clearedFields[backupjob.FieldPostgresProfileID] = struct{}{} -} - -// PostgresProfileIDCleared returns if the "postgres_profile_id" field was cleared in this mutation. -func (m *BackupJobMutation) PostgresProfileIDCleared() bool { - _, ok := m.clearedFields[backupjob.FieldPostgresProfileID] - return ok -} - -// ResetPostgresProfileID resets all changes to the "postgres_profile_id" field. -func (m *BackupJobMutation) ResetPostgresProfileID() { - m.postgres_profile_id = nil - delete(m.clearedFields, backupjob.FieldPostgresProfileID) -} - -// SetRedisProfileID sets the "redis_profile_id" field. -func (m *BackupJobMutation) SetRedisProfileID(s string) { - m.redis_profile_id = &s -} - -// RedisProfileID returns the value of the "redis_profile_id" field in the mutation. -func (m *BackupJobMutation) RedisProfileID() (r string, exists bool) { - v := m.redis_profile_id - if v == nil { - return - } - return *v, true -} - -// OldRedisProfileID returns the old "redis_profile_id" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldRedisProfileID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRedisProfileID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRedisProfileID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldRedisProfileID: %w", err) - } - return oldValue.RedisProfileID, nil -} - -// ClearRedisProfileID clears the value of the "redis_profile_id" field. -func (m *BackupJobMutation) ClearRedisProfileID() { - m.redis_profile_id = nil - m.clearedFields[backupjob.FieldRedisProfileID] = struct{}{} -} - -// RedisProfileIDCleared returns if the "redis_profile_id" field was cleared in this mutation. -func (m *BackupJobMutation) RedisProfileIDCleared() bool { - _, ok := m.clearedFields[backupjob.FieldRedisProfileID] - return ok -} - -// ResetRedisProfileID resets all changes to the "redis_profile_id" field. -func (m *BackupJobMutation) ResetRedisProfileID() { - m.redis_profile_id = nil - delete(m.clearedFields, backupjob.FieldRedisProfileID) -} - -// SetStartedAt sets the "started_at" field. -func (m *BackupJobMutation) SetStartedAt(t time.Time) { - m.started_at = &t -} - -// StartedAt returns the value of the "started_at" field in the mutation. -func (m *BackupJobMutation) StartedAt() (r time.Time, exists bool) { - v := m.started_at - if v == nil { - return - } - return *v, true -} - -// OldStartedAt returns the old "started_at" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldStartedAt(ctx context.Context) (v *time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldStartedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldStartedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldStartedAt: %w", err) - } - return oldValue.StartedAt, nil -} - -// ClearStartedAt clears the value of the "started_at" field. -func (m *BackupJobMutation) ClearStartedAt() { - m.started_at = nil - m.clearedFields[backupjob.FieldStartedAt] = struct{}{} -} - -// StartedAtCleared returns if the "started_at" field was cleared in this mutation. -func (m *BackupJobMutation) StartedAtCleared() bool { - _, ok := m.clearedFields[backupjob.FieldStartedAt] - return ok -} - -// ResetStartedAt resets all changes to the "started_at" field. -func (m *BackupJobMutation) ResetStartedAt() { - m.started_at = nil - delete(m.clearedFields, backupjob.FieldStartedAt) -} - -// SetFinishedAt sets the "finished_at" field. -func (m *BackupJobMutation) SetFinishedAt(t time.Time) { - m.finished_at = &t -} - -// FinishedAt returns the value of the "finished_at" field in the mutation. -func (m *BackupJobMutation) FinishedAt() (r time.Time, exists bool) { - v := m.finished_at - if v == nil { - return - } - return *v, true -} - -// OldFinishedAt returns the old "finished_at" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldFinishedAt(ctx context.Context) (v *time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldFinishedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldFinishedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldFinishedAt: %w", err) - } - return oldValue.FinishedAt, nil -} - -// ClearFinishedAt clears the value of the "finished_at" field. -func (m *BackupJobMutation) ClearFinishedAt() { - m.finished_at = nil - m.clearedFields[backupjob.FieldFinishedAt] = struct{}{} -} - -// FinishedAtCleared returns if the "finished_at" field was cleared in this mutation. -func (m *BackupJobMutation) FinishedAtCleared() bool { - _, ok := m.clearedFields[backupjob.FieldFinishedAt] - return ok -} - -// ResetFinishedAt resets all changes to the "finished_at" field. -func (m *BackupJobMutation) ResetFinishedAt() { - m.finished_at = nil - delete(m.clearedFields, backupjob.FieldFinishedAt) -} - -// SetErrorMessage sets the "error_message" field. -func (m *BackupJobMutation) SetErrorMessage(s string) { - m.error_message = &s -} - -// ErrorMessage returns the value of the "error_message" field in the mutation. -func (m *BackupJobMutation) ErrorMessage() (r string, exists bool) { - v := m.error_message - if v == nil { - return - } - return *v, true -} - -// OldErrorMessage returns the old "error_message" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldErrorMessage(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldErrorMessage is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldErrorMessage requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldErrorMessage: %w", err) - } - return oldValue.ErrorMessage, nil -} - -// ClearErrorMessage clears the value of the "error_message" field. -func (m *BackupJobMutation) ClearErrorMessage() { - m.error_message = nil - m.clearedFields[backupjob.FieldErrorMessage] = struct{}{} -} - -// ErrorMessageCleared returns if the "error_message" field was cleared in this mutation. -func (m *BackupJobMutation) ErrorMessageCleared() bool { - _, ok := m.clearedFields[backupjob.FieldErrorMessage] - return ok -} - -// ResetErrorMessage resets all changes to the "error_message" field. -func (m *BackupJobMutation) ResetErrorMessage() { - m.error_message = nil - delete(m.clearedFields, backupjob.FieldErrorMessage) -} - -// SetArtifactLocalPath sets the "artifact_local_path" field. -func (m *BackupJobMutation) SetArtifactLocalPath(s string) { - m.artifact_local_path = &s -} - -// ArtifactLocalPath returns the value of the "artifact_local_path" field in the mutation. -func (m *BackupJobMutation) ArtifactLocalPath() (r string, exists bool) { - v := m.artifact_local_path - if v == nil { - return - } - return *v, true -} - -// OldArtifactLocalPath returns the old "artifact_local_path" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldArtifactLocalPath(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldArtifactLocalPath is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldArtifactLocalPath requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldArtifactLocalPath: %w", err) - } - return oldValue.ArtifactLocalPath, nil -} - -// ClearArtifactLocalPath clears the value of the "artifact_local_path" field. -func (m *BackupJobMutation) ClearArtifactLocalPath() { - m.artifact_local_path = nil - m.clearedFields[backupjob.FieldArtifactLocalPath] = struct{}{} -} - -// ArtifactLocalPathCleared returns if the "artifact_local_path" field was cleared in this mutation. -func (m *BackupJobMutation) ArtifactLocalPathCleared() bool { - _, ok := m.clearedFields[backupjob.FieldArtifactLocalPath] - return ok -} - -// ResetArtifactLocalPath resets all changes to the "artifact_local_path" field. -func (m *BackupJobMutation) ResetArtifactLocalPath() { - m.artifact_local_path = nil - delete(m.clearedFields, backupjob.FieldArtifactLocalPath) -} - -// SetArtifactSizeBytes sets the "artifact_size_bytes" field. -func (m *BackupJobMutation) SetArtifactSizeBytes(i int64) { - m.artifact_size_bytes = &i - m.addartifact_size_bytes = nil -} - -// ArtifactSizeBytes returns the value of the "artifact_size_bytes" field in the mutation. -func (m *BackupJobMutation) ArtifactSizeBytes() (r int64, exists bool) { - v := m.artifact_size_bytes - if v == nil { - return - } - return *v, true -} - -// OldArtifactSizeBytes returns the old "artifact_size_bytes" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldArtifactSizeBytes(ctx context.Context) (v *int64, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldArtifactSizeBytes is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldArtifactSizeBytes requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldArtifactSizeBytes: %w", err) - } - return oldValue.ArtifactSizeBytes, nil -} - -// AddArtifactSizeBytes adds i to the "artifact_size_bytes" field. -func (m *BackupJobMutation) AddArtifactSizeBytes(i int64) { - if m.addartifact_size_bytes != nil { - *m.addartifact_size_bytes += i - } else { - m.addartifact_size_bytes = &i - } -} - -// AddedArtifactSizeBytes returns the value that was added to the "artifact_size_bytes" field in this mutation. -func (m *BackupJobMutation) AddedArtifactSizeBytes() (r int64, exists bool) { - v := m.addartifact_size_bytes - if v == nil { - return - } - return *v, true -} - -// ClearArtifactSizeBytes clears the value of the "artifact_size_bytes" field. -func (m *BackupJobMutation) ClearArtifactSizeBytes() { - m.artifact_size_bytes = nil - m.addartifact_size_bytes = nil - m.clearedFields[backupjob.FieldArtifactSizeBytes] = struct{}{} -} - -// ArtifactSizeBytesCleared returns if the "artifact_size_bytes" field was cleared in this mutation. -func (m *BackupJobMutation) ArtifactSizeBytesCleared() bool { - _, ok := m.clearedFields[backupjob.FieldArtifactSizeBytes] - return ok -} - -// ResetArtifactSizeBytes resets all changes to the "artifact_size_bytes" field. -func (m *BackupJobMutation) ResetArtifactSizeBytes() { - m.artifact_size_bytes = nil - m.addartifact_size_bytes = nil - delete(m.clearedFields, backupjob.FieldArtifactSizeBytes) -} - -// SetArtifactSha256 sets the "artifact_sha256" field. -func (m *BackupJobMutation) SetArtifactSha256(s string) { - m.artifact_sha256 = &s -} - -// ArtifactSha256 returns the value of the "artifact_sha256" field in the mutation. -func (m *BackupJobMutation) ArtifactSha256() (r string, exists bool) { - v := m.artifact_sha256 - if v == nil { - return - } - return *v, true -} - -// OldArtifactSha256 returns the old "artifact_sha256" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldArtifactSha256(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldArtifactSha256 is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldArtifactSha256 requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldArtifactSha256: %w", err) - } - return oldValue.ArtifactSha256, nil -} - -// ClearArtifactSha256 clears the value of the "artifact_sha256" field. -func (m *BackupJobMutation) ClearArtifactSha256() { - m.artifact_sha256 = nil - m.clearedFields[backupjob.FieldArtifactSha256] = struct{}{} -} - -// ArtifactSha256Cleared returns if the "artifact_sha256" field was cleared in this mutation. -func (m *BackupJobMutation) ArtifactSha256Cleared() bool { - _, ok := m.clearedFields[backupjob.FieldArtifactSha256] - return ok -} - -// ResetArtifactSha256 resets all changes to the "artifact_sha256" field. -func (m *BackupJobMutation) ResetArtifactSha256() { - m.artifact_sha256 = nil - delete(m.clearedFields, backupjob.FieldArtifactSha256) -} - -// SetS3Bucket sets the "s3_bucket" field. -func (m *BackupJobMutation) SetS3Bucket(s string) { - m.s3_bucket = &s -} - -// S3Bucket returns the value of the "s3_bucket" field in the mutation. -func (m *BackupJobMutation) S3Bucket() (r string, exists bool) { - v := m.s3_bucket - if v == nil { - return - } - return *v, true -} - -// OldS3Bucket returns the old "s3_bucket" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldS3Bucket(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldS3Bucket is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldS3Bucket requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldS3Bucket: %w", err) - } - return oldValue.S3Bucket, nil -} - -// ClearS3Bucket clears the value of the "s3_bucket" field. -func (m *BackupJobMutation) ClearS3Bucket() { - m.s3_bucket = nil - m.clearedFields[backupjob.FieldS3Bucket] = struct{}{} -} - -// S3BucketCleared returns if the "s3_bucket" field was cleared in this mutation. -func (m *BackupJobMutation) S3BucketCleared() bool { - _, ok := m.clearedFields[backupjob.FieldS3Bucket] - return ok -} - -// ResetS3Bucket resets all changes to the "s3_bucket" field. -func (m *BackupJobMutation) ResetS3Bucket() { - m.s3_bucket = nil - delete(m.clearedFields, backupjob.FieldS3Bucket) -} - -// SetS3Key sets the "s3_key" field. -func (m *BackupJobMutation) SetS3Key(s string) { - m.s3_key = &s -} - -// S3Key returns the value of the "s3_key" field in the mutation. -func (m *BackupJobMutation) S3Key() (r string, exists bool) { - v := m.s3_key - if v == nil { - return - } - return *v, true -} - -// OldS3Key returns the old "s3_key" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldS3Key(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldS3Key is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldS3Key requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldS3Key: %w", err) - } - return oldValue.S3Key, nil -} - -// ClearS3Key clears the value of the "s3_key" field. -func (m *BackupJobMutation) ClearS3Key() { - m.s3_key = nil - m.clearedFields[backupjob.FieldS3Key] = struct{}{} -} - -// S3KeyCleared returns if the "s3_key" field was cleared in this mutation. -func (m *BackupJobMutation) S3KeyCleared() bool { - _, ok := m.clearedFields[backupjob.FieldS3Key] - return ok -} - -// ResetS3Key resets all changes to the "s3_key" field. -func (m *BackupJobMutation) ResetS3Key() { - m.s3_key = nil - delete(m.clearedFields, backupjob.FieldS3Key) -} - -// SetS3Etag sets the "s3_etag" field. -func (m *BackupJobMutation) SetS3Etag(s string) { - m.s3_etag = &s -} - -// S3Etag returns the value of the "s3_etag" field in the mutation. -func (m *BackupJobMutation) S3Etag() (r string, exists bool) { - v := m.s3_etag - if v == nil { - return - } - return *v, true -} - -// OldS3Etag returns the old "s3_etag" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldS3Etag(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldS3Etag is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldS3Etag requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldS3Etag: %w", err) - } - return oldValue.S3Etag, nil -} - -// ClearS3Etag clears the value of the "s3_etag" field. -func (m *BackupJobMutation) ClearS3Etag() { - m.s3_etag = nil - m.clearedFields[backupjob.FieldS3Etag] = struct{}{} -} - -// S3EtagCleared returns if the "s3_etag" field was cleared in this mutation. -func (m *BackupJobMutation) S3EtagCleared() bool { - _, ok := m.clearedFields[backupjob.FieldS3Etag] - return ok -} - -// ResetS3Etag resets all changes to the "s3_etag" field. -func (m *BackupJobMutation) ResetS3Etag() { - m.s3_etag = nil - delete(m.clearedFields, backupjob.FieldS3Etag) -} - -// SetCreatedAt sets the "created_at" field. -func (m *BackupJobMutation) SetCreatedAt(t time.Time) { - m.created_at = &t -} - -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *BackupJobMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at - if v == nil { - return - } - return *v, true -} - -// OldCreatedAt returns the old "created_at" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) - } - return oldValue.CreatedAt, nil -} - -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *BackupJobMutation) ResetCreatedAt() { - m.created_at = nil -} - -// SetUpdatedAt sets the "updated_at" field. -func (m *BackupJobMutation) SetUpdatedAt(t time.Time) { - m.updated_at = &t -} - -// UpdatedAt returns the value of the "updated_at" field in the mutation. -func (m *BackupJobMutation) UpdatedAt() (r time.Time, exists bool) { - v := m.updated_at - if v == nil { - return - } - return *v, true -} - -// OldUpdatedAt returns the old "updated_at" field's value of the BackupJob entity. -// If the BackupJob object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUpdatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) - } - return oldValue.UpdatedAt, nil -} - -// ResetUpdatedAt resets all changes to the "updated_at" field. -func (m *BackupJobMutation) ResetUpdatedAt() { - m.updated_at = nil -} - -// AddEventIDs adds the "events" edge to the BackupJobEvent entity by ids. -func (m *BackupJobMutation) AddEventIDs(ids ...int) { - if m.events == nil { - m.events = make(map[int]struct{}) - } - for i := range ids { - m.events[ids[i]] = struct{}{} - } -} - -// ClearEvents clears the "events" edge to the BackupJobEvent entity. -func (m *BackupJobMutation) ClearEvents() { - m.clearedevents = true -} - -// EventsCleared reports if the "events" edge to the BackupJobEvent entity was cleared. -func (m *BackupJobMutation) EventsCleared() bool { - return m.clearedevents -} - -// RemoveEventIDs removes the "events" edge to the BackupJobEvent entity by IDs. -func (m *BackupJobMutation) RemoveEventIDs(ids ...int) { - if m.removedevents == nil { - m.removedevents = make(map[int]struct{}) - } - for i := range ids { - delete(m.events, ids[i]) - m.removedevents[ids[i]] = struct{}{} - } -} - -// RemovedEvents returns the removed IDs of the "events" edge to the BackupJobEvent entity. -func (m *BackupJobMutation) RemovedEventsIDs() (ids []int) { - for id := range m.removedevents { - ids = append(ids, id) - } - return -} - -// EventsIDs returns the "events" edge IDs in the mutation. -func (m *BackupJobMutation) EventsIDs() (ids []int) { - for id := range m.events { - ids = append(ids, id) - } - return -} - -// ResetEvents resets all changes to the "events" edge. -func (m *BackupJobMutation) ResetEvents() { - m.events = nil - m.clearedevents = false - m.removedevents = nil -} - -// Where appends a list predicates to the BackupJobMutation builder. -func (m *BackupJobMutation) Where(ps ...predicate.BackupJob) { - m.predicates = append(m.predicates, ps...) -} - -// WhereP appends storage-level predicates to the BackupJobMutation builder. Using this method, -// users can use type-assertion to append predicates that do not depend on any generated package. -func (m *BackupJobMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.BackupJob, len(ps)) - for i := range ps { - p[i] = ps[i] - } - m.Where(p...) -} - -// Op returns the operation name. -func (m *BackupJobMutation) Op() Op { - return m.op -} - -// SetOp allows setting the mutation operation. -func (m *BackupJobMutation) SetOp(op Op) { - m.op = op -} - -// Type returns the node type of this mutation (BackupJob). -func (m *BackupJobMutation) Type() string { - return m.typ -} - -// Fields returns all fields that were changed during this mutation. Note that in -// order to get all numeric fields that were incremented/decremented, call -// AddedFields(). -func (m *BackupJobMutation) Fields() []string { - fields := make([]string, 0, 20) - if m.job_id != nil { - fields = append(fields, backupjob.FieldJobID) - } - if m.backup_type != nil { - fields = append(fields, backupjob.FieldBackupType) - } - if m.status != nil { - fields = append(fields, backupjob.FieldStatus) - } - if m.triggered_by != nil { - fields = append(fields, backupjob.FieldTriggeredBy) - } - if m.idempotency_key != nil { - fields = append(fields, backupjob.FieldIdempotencyKey) - } - if m.upload_to_s3 != nil { - fields = append(fields, backupjob.FieldUploadToS3) - } - if m.s3_profile_id != nil { - fields = append(fields, backupjob.FieldS3ProfileID) - } - if m.postgres_profile_id != nil { - fields = append(fields, backupjob.FieldPostgresProfileID) - } - if m.redis_profile_id != nil { - fields = append(fields, backupjob.FieldRedisProfileID) - } - if m.started_at != nil { - fields = append(fields, backupjob.FieldStartedAt) - } - if m.finished_at != nil { - fields = append(fields, backupjob.FieldFinishedAt) - } - if m.error_message != nil { - fields = append(fields, backupjob.FieldErrorMessage) - } - if m.artifact_local_path != nil { - fields = append(fields, backupjob.FieldArtifactLocalPath) - } - if m.artifact_size_bytes != nil { - fields = append(fields, backupjob.FieldArtifactSizeBytes) - } - if m.artifact_sha256 != nil { - fields = append(fields, backupjob.FieldArtifactSha256) - } - if m.s3_bucket != nil { - fields = append(fields, backupjob.FieldS3Bucket) - } - if m.s3_key != nil { - fields = append(fields, backupjob.FieldS3Key) - } - if m.s3_etag != nil { - fields = append(fields, backupjob.FieldS3Etag) - } - if m.created_at != nil { - fields = append(fields, backupjob.FieldCreatedAt) - } - if m.updated_at != nil { - fields = append(fields, backupjob.FieldUpdatedAt) - } - return fields -} - -// Field returns the value of a field with the given name. The second boolean -// return value indicates that this field was not set, or was not defined in the -// schema. -func (m *BackupJobMutation) Field(name string) (ent.Value, bool) { - switch name { - case backupjob.FieldJobID: - return m.JobID() - case backupjob.FieldBackupType: - return m.BackupType() - case backupjob.FieldStatus: - return m.Status() - case backupjob.FieldTriggeredBy: - return m.TriggeredBy() - case backupjob.FieldIdempotencyKey: - return m.IdempotencyKey() - case backupjob.FieldUploadToS3: - return m.UploadToS3() - case backupjob.FieldS3ProfileID: - return m.S3ProfileID() - case backupjob.FieldPostgresProfileID: - return m.PostgresProfileID() - case backupjob.FieldRedisProfileID: - return m.RedisProfileID() - case backupjob.FieldStartedAt: - return m.StartedAt() - case backupjob.FieldFinishedAt: - return m.FinishedAt() - case backupjob.FieldErrorMessage: - return m.ErrorMessage() - case backupjob.FieldArtifactLocalPath: - return m.ArtifactLocalPath() - case backupjob.FieldArtifactSizeBytes: - return m.ArtifactSizeBytes() - case backupjob.FieldArtifactSha256: - return m.ArtifactSha256() - case backupjob.FieldS3Bucket: - return m.S3Bucket() - case backupjob.FieldS3Key: - return m.S3Key() - case backupjob.FieldS3Etag: - return m.S3Etag() - case backupjob.FieldCreatedAt: - return m.CreatedAt() - case backupjob.FieldUpdatedAt: - return m.UpdatedAt() - } - return nil, false -} - -// OldField returns the old value of the field from the database. An error is -// returned if the mutation operation is not UpdateOne, or the query to the -// database failed. -func (m *BackupJobMutation) OldField(ctx context.Context, name string) (ent.Value, error) { - switch name { - case backupjob.FieldJobID: - return m.OldJobID(ctx) - case backupjob.FieldBackupType: - return m.OldBackupType(ctx) - case backupjob.FieldStatus: - return m.OldStatus(ctx) - case backupjob.FieldTriggeredBy: - return m.OldTriggeredBy(ctx) - case backupjob.FieldIdempotencyKey: - return m.OldIdempotencyKey(ctx) - case backupjob.FieldUploadToS3: - return m.OldUploadToS3(ctx) - case backupjob.FieldS3ProfileID: - return m.OldS3ProfileID(ctx) - case backupjob.FieldPostgresProfileID: - return m.OldPostgresProfileID(ctx) - case backupjob.FieldRedisProfileID: - return m.OldRedisProfileID(ctx) - case backupjob.FieldStartedAt: - return m.OldStartedAt(ctx) - case backupjob.FieldFinishedAt: - return m.OldFinishedAt(ctx) - case backupjob.FieldErrorMessage: - return m.OldErrorMessage(ctx) - case backupjob.FieldArtifactLocalPath: - return m.OldArtifactLocalPath(ctx) - case backupjob.FieldArtifactSizeBytes: - return m.OldArtifactSizeBytes(ctx) - case backupjob.FieldArtifactSha256: - return m.OldArtifactSha256(ctx) - case backupjob.FieldS3Bucket: - return m.OldS3Bucket(ctx) - case backupjob.FieldS3Key: - return m.OldS3Key(ctx) - case backupjob.FieldS3Etag: - return m.OldS3Etag(ctx) - case backupjob.FieldCreatedAt: - return m.OldCreatedAt(ctx) - case backupjob.FieldUpdatedAt: - return m.OldUpdatedAt(ctx) - } - return nil, fmt.Errorf("unknown BackupJob field %s", name) -} - -// SetField sets the value of a field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupJobMutation) SetField(name string, value ent.Value) error { - switch name { - case backupjob.FieldJobID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetJobID(v) - return nil - case backupjob.FieldBackupType: - v, ok := value.(backupjob.BackupType) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetBackupType(v) - return nil - case backupjob.FieldStatus: - v, ok := value.(backupjob.Status) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetStatus(v) - return nil - case backupjob.FieldTriggeredBy: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetTriggeredBy(v) - return nil - case backupjob.FieldIdempotencyKey: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetIdempotencyKey(v) - return nil - case backupjob.FieldUploadToS3: - v, ok := value.(bool) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUploadToS3(v) - return nil - case backupjob.FieldS3ProfileID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetS3ProfileID(v) - return nil - case backupjob.FieldPostgresProfileID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetPostgresProfileID(v) - return nil - case backupjob.FieldRedisProfileID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRedisProfileID(v) - return nil - case backupjob.FieldStartedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetStartedAt(v) - return nil - case backupjob.FieldFinishedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetFinishedAt(v) - return nil - case backupjob.FieldErrorMessage: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetErrorMessage(v) - return nil - case backupjob.FieldArtifactLocalPath: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetArtifactLocalPath(v) - return nil - case backupjob.FieldArtifactSizeBytes: - v, ok := value.(int64) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetArtifactSizeBytes(v) - return nil - case backupjob.FieldArtifactSha256: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetArtifactSha256(v) - return nil - case backupjob.FieldS3Bucket: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetS3Bucket(v) - return nil - case backupjob.FieldS3Key: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetS3Key(v) - return nil - case backupjob.FieldS3Etag: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetS3Etag(v) - return nil - case backupjob.FieldCreatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetCreatedAt(v) - return nil - case backupjob.FieldUpdatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUpdatedAt(v) - return nil - } - return fmt.Errorf("unknown BackupJob field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *BackupJobMutation) AddedFields() []string { - var fields []string - if m.addartifact_size_bytes != nil { - fields = append(fields, backupjob.FieldArtifactSizeBytes) - } - return fields -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *BackupJobMutation) AddedField(name string) (ent.Value, bool) { - switch name { - case backupjob.FieldArtifactSizeBytes: - return m.AddedArtifactSizeBytes() - } - return nil, false -} - -// AddField adds the value to the field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupJobMutation) AddField(name string, value ent.Value) error { - switch name { - case backupjob.FieldArtifactSizeBytes: - v, ok := value.(int64) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddArtifactSizeBytes(v) - return nil - } - return fmt.Errorf("unknown BackupJob numeric field %s", name) -} - -// ClearedFields returns all nullable fields that were cleared during this -// mutation. -func (m *BackupJobMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(backupjob.FieldIdempotencyKey) { - fields = append(fields, backupjob.FieldIdempotencyKey) - } - if m.FieldCleared(backupjob.FieldS3ProfileID) { - fields = append(fields, backupjob.FieldS3ProfileID) - } - if m.FieldCleared(backupjob.FieldPostgresProfileID) { - fields = append(fields, backupjob.FieldPostgresProfileID) - } - if m.FieldCleared(backupjob.FieldRedisProfileID) { - fields = append(fields, backupjob.FieldRedisProfileID) - } - if m.FieldCleared(backupjob.FieldStartedAt) { - fields = append(fields, backupjob.FieldStartedAt) - } - if m.FieldCleared(backupjob.FieldFinishedAt) { - fields = append(fields, backupjob.FieldFinishedAt) - } - if m.FieldCleared(backupjob.FieldErrorMessage) { - fields = append(fields, backupjob.FieldErrorMessage) - } - if m.FieldCleared(backupjob.FieldArtifactLocalPath) { - fields = append(fields, backupjob.FieldArtifactLocalPath) - } - if m.FieldCleared(backupjob.FieldArtifactSizeBytes) { - fields = append(fields, backupjob.FieldArtifactSizeBytes) - } - if m.FieldCleared(backupjob.FieldArtifactSha256) { - fields = append(fields, backupjob.FieldArtifactSha256) - } - if m.FieldCleared(backupjob.FieldS3Bucket) { - fields = append(fields, backupjob.FieldS3Bucket) - } - if m.FieldCleared(backupjob.FieldS3Key) { - fields = append(fields, backupjob.FieldS3Key) - } - if m.FieldCleared(backupjob.FieldS3Etag) { - fields = append(fields, backupjob.FieldS3Etag) - } - return fields -} - -// FieldCleared returns a boolean indicating if a field with the given name was -// cleared in this mutation. -func (m *BackupJobMutation) FieldCleared(name string) bool { - _, ok := m.clearedFields[name] - return ok -} - -// ClearField clears the value of the field with the given name. It returns an -// error if the field is not defined in the schema. -func (m *BackupJobMutation) ClearField(name string) error { - switch name { - case backupjob.FieldIdempotencyKey: - m.ClearIdempotencyKey() - return nil - case backupjob.FieldS3ProfileID: - m.ClearS3ProfileID() - return nil - case backupjob.FieldPostgresProfileID: - m.ClearPostgresProfileID() - return nil - case backupjob.FieldRedisProfileID: - m.ClearRedisProfileID() - return nil - case backupjob.FieldStartedAt: - m.ClearStartedAt() - return nil - case backupjob.FieldFinishedAt: - m.ClearFinishedAt() - return nil - case backupjob.FieldErrorMessage: - m.ClearErrorMessage() - return nil - case backupjob.FieldArtifactLocalPath: - m.ClearArtifactLocalPath() - return nil - case backupjob.FieldArtifactSizeBytes: - m.ClearArtifactSizeBytes() - return nil - case backupjob.FieldArtifactSha256: - m.ClearArtifactSha256() - return nil - case backupjob.FieldS3Bucket: - m.ClearS3Bucket() - return nil - case backupjob.FieldS3Key: - m.ClearS3Key() - return nil - case backupjob.FieldS3Etag: - m.ClearS3Etag() - return nil - } - return fmt.Errorf("unknown BackupJob nullable field %s", name) -} - -// ResetField resets all changes in the mutation for the field with the given name. -// It returns an error if the field is not defined in the schema. -func (m *BackupJobMutation) ResetField(name string) error { - switch name { - case backupjob.FieldJobID: - m.ResetJobID() - return nil - case backupjob.FieldBackupType: - m.ResetBackupType() - return nil - case backupjob.FieldStatus: - m.ResetStatus() - return nil - case backupjob.FieldTriggeredBy: - m.ResetTriggeredBy() - return nil - case backupjob.FieldIdempotencyKey: - m.ResetIdempotencyKey() - return nil - case backupjob.FieldUploadToS3: - m.ResetUploadToS3() - return nil - case backupjob.FieldS3ProfileID: - m.ResetS3ProfileID() - return nil - case backupjob.FieldPostgresProfileID: - m.ResetPostgresProfileID() - return nil - case backupjob.FieldRedisProfileID: - m.ResetRedisProfileID() - return nil - case backupjob.FieldStartedAt: - m.ResetStartedAt() - return nil - case backupjob.FieldFinishedAt: - m.ResetFinishedAt() - return nil - case backupjob.FieldErrorMessage: - m.ResetErrorMessage() - return nil - case backupjob.FieldArtifactLocalPath: - m.ResetArtifactLocalPath() - return nil - case backupjob.FieldArtifactSizeBytes: - m.ResetArtifactSizeBytes() - return nil - case backupjob.FieldArtifactSha256: - m.ResetArtifactSha256() - return nil - case backupjob.FieldS3Bucket: - m.ResetS3Bucket() - return nil - case backupjob.FieldS3Key: - m.ResetS3Key() - return nil - case backupjob.FieldS3Etag: - m.ResetS3Etag() - return nil - case backupjob.FieldCreatedAt: - m.ResetCreatedAt() - return nil - case backupjob.FieldUpdatedAt: - m.ResetUpdatedAt() - return nil - } - return fmt.Errorf("unknown BackupJob field %s", name) -} - -// AddedEdges returns all edge names that were set/added in this mutation. -func (m *BackupJobMutation) AddedEdges() []string { - edges := make([]string, 0, 1) - if m.events != nil { - edges = append(edges, backupjob.EdgeEvents) - } - return edges -} - -// AddedIDs returns all IDs (to other nodes) that were added for the given edge -// name in this mutation. -func (m *BackupJobMutation) AddedIDs(name string) []ent.Value { - switch name { - case backupjob.EdgeEvents: - ids := make([]ent.Value, 0, len(m.events)) - for id := range m.events { - ids = append(ids, id) - } - return ids - } - return nil -} - -// RemovedEdges returns all edge names that were removed in this mutation. -func (m *BackupJobMutation) RemovedEdges() []string { - edges := make([]string, 0, 1) - if m.removedevents != nil { - edges = append(edges, backupjob.EdgeEvents) - } - return edges -} - -// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with -// the given name in this mutation. -func (m *BackupJobMutation) RemovedIDs(name string) []ent.Value { - switch name { - case backupjob.EdgeEvents: - ids := make([]ent.Value, 0, len(m.removedevents)) - for id := range m.removedevents { - ids = append(ids, id) - } - return ids - } - return nil -} - -// ClearedEdges returns all edge names that were cleared in this mutation. -func (m *BackupJobMutation) ClearedEdges() []string { - edges := make([]string, 0, 1) - if m.clearedevents { - edges = append(edges, backupjob.EdgeEvents) - } - return edges -} - -// EdgeCleared returns a boolean which indicates if the edge with the given name -// was cleared in this mutation. -func (m *BackupJobMutation) EdgeCleared(name string) bool { - switch name { - case backupjob.EdgeEvents: - return m.clearedevents - } - return false -} - -// ClearEdge clears the value of the edge with the given name. It returns an error -// if that edge is not defined in the schema. -func (m *BackupJobMutation) ClearEdge(name string) error { - switch name { - } - return fmt.Errorf("unknown BackupJob unique edge %s", name) -} - -// ResetEdge resets all changes to the edge with the given name in this mutation. -// It returns an error if the edge is not defined in the schema. -func (m *BackupJobMutation) ResetEdge(name string) error { - switch name { - case backupjob.EdgeEvents: - m.ResetEvents() - return nil - } - return fmt.Errorf("unknown BackupJob edge %s", name) -} - -// BackupJobEventMutation represents an operation that mutates the BackupJobEvent nodes in the graph. -type BackupJobEventMutation struct { - config - op Op - typ string - id *int - level *backupjobevent.Level - event_type *string - message *string - payload *string - event_time *time.Time - created_at *time.Time - clearedFields map[string]struct{} - job *int - clearedjob bool - done bool - oldValue func(context.Context) (*BackupJobEvent, error) - predicates []predicate.BackupJobEvent -} - -var _ ent.Mutation = (*BackupJobEventMutation)(nil) - -// backupjobeventOption allows management of the mutation configuration using functional options. -type backupjobeventOption func(*BackupJobEventMutation) - -// newBackupJobEventMutation creates new mutation for the BackupJobEvent entity. -func newBackupJobEventMutation(c config, op Op, opts ...backupjobeventOption) *BackupJobEventMutation { - m := &BackupJobEventMutation{ - config: c, - op: op, - typ: TypeBackupJobEvent, - clearedFields: make(map[string]struct{}), - } - for _, opt := range opts { - opt(m) - } - return m -} - -// withBackupJobEventID sets the ID field of the mutation. -func withBackupJobEventID(id int) backupjobeventOption { - return func(m *BackupJobEventMutation) { - var ( - err error - once sync.Once - value *BackupJobEvent - ) - m.oldValue = func(ctx context.Context) (*BackupJobEvent, error) { - once.Do(func() { - if m.done { - err = errors.New("querying old values post mutation is not allowed") - } else { - value, err = m.Client().BackupJobEvent.Get(ctx, id) - } - }) - return value, err - } - m.id = &id - } -} - -// withBackupJobEvent sets the old BackupJobEvent of the mutation. -func withBackupJobEvent(node *BackupJobEvent) backupjobeventOption { - return func(m *BackupJobEventMutation) { - m.oldValue = func(context.Context) (*BackupJobEvent, error) { - return node, nil - } - m.id = &node.ID - } -} - -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m BackupJobEventMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client -} - -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m BackupJobEventMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("ent: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil -} - -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *BackupJobEventMutation) ID() (id int, exists bool) { - if m.id == nil { - return - } - return *m.id, true -} - -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *BackupJobEventMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().BackupJobEvent.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) - } -} - -// SetBackupJobID sets the "backup_job_id" field. -func (m *BackupJobEventMutation) SetBackupJobID(i int) { - m.job = &i -} - -// BackupJobID returns the value of the "backup_job_id" field in the mutation. -func (m *BackupJobEventMutation) BackupJobID() (r int, exists bool) { - v := m.job - if v == nil { - return - } - return *v, true -} - -// OldBackupJobID returns the old "backup_job_id" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldBackupJobID(ctx context.Context) (v int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldBackupJobID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldBackupJobID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldBackupJobID: %w", err) - } - return oldValue.BackupJobID, nil -} - -// ResetBackupJobID resets all changes to the "backup_job_id" field. -func (m *BackupJobEventMutation) ResetBackupJobID() { - m.job = nil -} - -// SetLevel sets the "level" field. -func (m *BackupJobEventMutation) SetLevel(b backupjobevent.Level) { - m.level = &b -} - -// Level returns the value of the "level" field in the mutation. -func (m *BackupJobEventMutation) Level() (r backupjobevent.Level, exists bool) { - v := m.level - if v == nil { - return - } - return *v, true -} - -// OldLevel returns the old "level" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldLevel(ctx context.Context) (v backupjobevent.Level, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldLevel is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldLevel requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldLevel: %w", err) - } - return oldValue.Level, nil -} - -// ResetLevel resets all changes to the "level" field. -func (m *BackupJobEventMutation) ResetLevel() { - m.level = nil -} - -// SetEventType sets the "event_type" field. -func (m *BackupJobEventMutation) SetEventType(s string) { - m.event_type = &s -} - -// EventType returns the value of the "event_type" field in the mutation. -func (m *BackupJobEventMutation) EventType() (r string, exists bool) { - v := m.event_type - if v == nil { - return - } - return *v, true -} - -// OldEventType returns the old "event_type" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldEventType(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldEventType is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldEventType requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldEventType: %w", err) - } - return oldValue.EventType, nil -} - -// ResetEventType resets all changes to the "event_type" field. -func (m *BackupJobEventMutation) ResetEventType() { - m.event_type = nil -} - -// SetMessage sets the "message" field. -func (m *BackupJobEventMutation) SetMessage(s string) { - m.message = &s -} - -// Message returns the value of the "message" field in the mutation. -func (m *BackupJobEventMutation) Message() (r string, exists bool) { - v := m.message - if v == nil { - return - } - return *v, true -} - -// OldMessage returns the old "message" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldMessage(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldMessage is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldMessage requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldMessage: %w", err) - } - return oldValue.Message, nil -} - -// ResetMessage resets all changes to the "message" field. -func (m *BackupJobEventMutation) ResetMessage() { - m.message = nil -} - -// SetPayload sets the "payload" field. -func (m *BackupJobEventMutation) SetPayload(s string) { - m.payload = &s -} - -// Payload returns the value of the "payload" field in the mutation. -func (m *BackupJobEventMutation) Payload() (r string, exists bool) { - v := m.payload - if v == nil { - return - } - return *v, true -} - -// OldPayload returns the old "payload" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldPayload(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPayload is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPayload requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPayload: %w", err) - } - return oldValue.Payload, nil -} - -// ClearPayload clears the value of the "payload" field. -func (m *BackupJobEventMutation) ClearPayload() { - m.payload = nil - m.clearedFields[backupjobevent.FieldPayload] = struct{}{} -} - -// PayloadCleared returns if the "payload" field was cleared in this mutation. -func (m *BackupJobEventMutation) PayloadCleared() bool { - _, ok := m.clearedFields[backupjobevent.FieldPayload] - return ok -} - -// ResetPayload resets all changes to the "payload" field. -func (m *BackupJobEventMutation) ResetPayload() { - m.payload = nil - delete(m.clearedFields, backupjobevent.FieldPayload) -} - -// SetEventTime sets the "event_time" field. -func (m *BackupJobEventMutation) SetEventTime(t time.Time) { - m.event_time = &t -} - -// EventTime returns the value of the "event_time" field in the mutation. -func (m *BackupJobEventMutation) EventTime() (r time.Time, exists bool) { - v := m.event_time - if v == nil { - return - } - return *v, true -} - -// OldEventTime returns the old "event_time" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldEventTime(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldEventTime is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldEventTime requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldEventTime: %w", err) - } - return oldValue.EventTime, nil -} - -// ResetEventTime resets all changes to the "event_time" field. -func (m *BackupJobEventMutation) ResetEventTime() { - m.event_time = nil -} - -// SetCreatedAt sets the "created_at" field. -func (m *BackupJobEventMutation) SetCreatedAt(t time.Time) { - m.created_at = &t -} - -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *BackupJobEventMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at - if v == nil { - return - } - return *v, true -} - -// OldCreatedAt returns the old "created_at" field's value of the BackupJobEvent entity. -// If the BackupJobEvent object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupJobEventMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) - } - return oldValue.CreatedAt, nil -} - -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *BackupJobEventMutation) ResetCreatedAt() { - m.created_at = nil -} - -// SetJobID sets the "job" edge to the BackupJob entity by id. -func (m *BackupJobEventMutation) SetJobID(id int) { - m.job = &id -} - -// ClearJob clears the "job" edge to the BackupJob entity. -func (m *BackupJobEventMutation) ClearJob() { - m.clearedjob = true - m.clearedFields[backupjobevent.FieldBackupJobID] = struct{}{} -} - -// JobCleared reports if the "job" edge to the BackupJob entity was cleared. -func (m *BackupJobEventMutation) JobCleared() bool { - return m.clearedjob -} - -// JobID returns the "job" edge ID in the mutation. -func (m *BackupJobEventMutation) JobID() (id int, exists bool) { - if m.job != nil { - return *m.job, true - } - return -} - -// JobIDs returns the "job" edge IDs in the mutation. -// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use -// JobID instead. It exists only for internal usage by the builders. -func (m *BackupJobEventMutation) JobIDs() (ids []int) { - if id := m.job; id != nil { - ids = append(ids, *id) - } - return -} - -// ResetJob resets all changes to the "job" edge. -func (m *BackupJobEventMutation) ResetJob() { - m.job = nil - m.clearedjob = false -} - -// Where appends a list predicates to the BackupJobEventMutation builder. -func (m *BackupJobEventMutation) Where(ps ...predicate.BackupJobEvent) { - m.predicates = append(m.predicates, ps...) -} - -// WhereP appends storage-level predicates to the BackupJobEventMutation builder. Using this method, -// users can use type-assertion to append predicates that do not depend on any generated package. -func (m *BackupJobEventMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.BackupJobEvent, len(ps)) - for i := range ps { - p[i] = ps[i] - } - m.Where(p...) -} - -// Op returns the operation name. -func (m *BackupJobEventMutation) Op() Op { - return m.op -} - -// SetOp allows setting the mutation operation. -func (m *BackupJobEventMutation) SetOp(op Op) { - m.op = op -} - -// Type returns the node type of this mutation (BackupJobEvent). -func (m *BackupJobEventMutation) Type() string { - return m.typ -} - -// Fields returns all fields that were changed during this mutation. Note that in -// order to get all numeric fields that were incremented/decremented, call -// AddedFields(). -func (m *BackupJobEventMutation) Fields() []string { - fields := make([]string, 0, 7) - if m.job != nil { - fields = append(fields, backupjobevent.FieldBackupJobID) - } - if m.level != nil { - fields = append(fields, backupjobevent.FieldLevel) - } - if m.event_type != nil { - fields = append(fields, backupjobevent.FieldEventType) - } - if m.message != nil { - fields = append(fields, backupjobevent.FieldMessage) - } - if m.payload != nil { - fields = append(fields, backupjobevent.FieldPayload) - } - if m.event_time != nil { - fields = append(fields, backupjobevent.FieldEventTime) - } - if m.created_at != nil { - fields = append(fields, backupjobevent.FieldCreatedAt) - } - return fields -} - -// Field returns the value of a field with the given name. The second boolean -// return value indicates that this field was not set, or was not defined in the -// schema. -func (m *BackupJobEventMutation) Field(name string) (ent.Value, bool) { - switch name { - case backupjobevent.FieldBackupJobID: - return m.BackupJobID() - case backupjobevent.FieldLevel: - return m.Level() - case backupjobevent.FieldEventType: - return m.EventType() - case backupjobevent.FieldMessage: - return m.Message() - case backupjobevent.FieldPayload: - return m.Payload() - case backupjobevent.FieldEventTime: - return m.EventTime() - case backupjobevent.FieldCreatedAt: - return m.CreatedAt() - } - return nil, false -} - -// OldField returns the old value of the field from the database. An error is -// returned if the mutation operation is not UpdateOne, or the query to the -// database failed. -func (m *BackupJobEventMutation) OldField(ctx context.Context, name string) (ent.Value, error) { - switch name { - case backupjobevent.FieldBackupJobID: - return m.OldBackupJobID(ctx) - case backupjobevent.FieldLevel: - return m.OldLevel(ctx) - case backupjobevent.FieldEventType: - return m.OldEventType(ctx) - case backupjobevent.FieldMessage: - return m.OldMessage(ctx) - case backupjobevent.FieldPayload: - return m.OldPayload(ctx) - case backupjobevent.FieldEventTime: - return m.OldEventTime(ctx) - case backupjobevent.FieldCreatedAt: - return m.OldCreatedAt(ctx) - } - return nil, fmt.Errorf("unknown BackupJobEvent field %s", name) -} - -// SetField sets the value of a field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupJobEventMutation) SetField(name string, value ent.Value) error { - switch name { - case backupjobevent.FieldBackupJobID: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetBackupJobID(v) - return nil - case backupjobevent.FieldLevel: - v, ok := value.(backupjobevent.Level) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetLevel(v) - return nil - case backupjobevent.FieldEventType: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetEventType(v) - return nil - case backupjobevent.FieldMessage: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetMessage(v) - return nil - case backupjobevent.FieldPayload: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetPayload(v) - return nil - case backupjobevent.FieldEventTime: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetEventTime(v) - return nil - case backupjobevent.FieldCreatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetCreatedAt(v) - return nil - } - return fmt.Errorf("unknown BackupJobEvent field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *BackupJobEventMutation) AddedFields() []string { - var fields []string - return fields -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *BackupJobEventMutation) AddedField(name string) (ent.Value, bool) { - switch name { - } - return nil, false -} - -// AddField adds the value to the field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupJobEventMutation) AddField(name string, value ent.Value) error { - switch name { - } - return fmt.Errorf("unknown BackupJobEvent numeric field %s", name) -} - -// ClearedFields returns all nullable fields that were cleared during this -// mutation. -func (m *BackupJobEventMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(backupjobevent.FieldPayload) { - fields = append(fields, backupjobevent.FieldPayload) - } - return fields -} - -// FieldCleared returns a boolean indicating if a field with the given name was -// cleared in this mutation. -func (m *BackupJobEventMutation) FieldCleared(name string) bool { - _, ok := m.clearedFields[name] - return ok -} - -// ClearField clears the value of the field with the given name. It returns an -// error if the field is not defined in the schema. -func (m *BackupJobEventMutation) ClearField(name string) error { - switch name { - case backupjobevent.FieldPayload: - m.ClearPayload() - return nil - } - return fmt.Errorf("unknown BackupJobEvent nullable field %s", name) -} - -// ResetField resets all changes in the mutation for the field with the given name. -// It returns an error if the field is not defined in the schema. -func (m *BackupJobEventMutation) ResetField(name string) error { - switch name { - case backupjobevent.FieldBackupJobID: - m.ResetBackupJobID() - return nil - case backupjobevent.FieldLevel: - m.ResetLevel() - return nil - case backupjobevent.FieldEventType: - m.ResetEventType() - return nil - case backupjobevent.FieldMessage: - m.ResetMessage() - return nil - case backupjobevent.FieldPayload: - m.ResetPayload() - return nil - case backupjobevent.FieldEventTime: - m.ResetEventTime() - return nil - case backupjobevent.FieldCreatedAt: - m.ResetCreatedAt() - return nil - } - return fmt.Errorf("unknown BackupJobEvent field %s", name) -} - -// AddedEdges returns all edge names that were set/added in this mutation. -func (m *BackupJobEventMutation) AddedEdges() []string { - edges := make([]string, 0, 1) - if m.job != nil { - edges = append(edges, backupjobevent.EdgeJob) - } - return edges -} - -// AddedIDs returns all IDs (to other nodes) that were added for the given edge -// name in this mutation. -func (m *BackupJobEventMutation) AddedIDs(name string) []ent.Value { - switch name { - case backupjobevent.EdgeJob: - if id := m.job; id != nil { - return []ent.Value{*id} - } - } - return nil -} - -// RemovedEdges returns all edge names that were removed in this mutation. -func (m *BackupJobEventMutation) RemovedEdges() []string { - edges := make([]string, 0, 1) - return edges -} - -// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with -// the given name in this mutation. -func (m *BackupJobEventMutation) RemovedIDs(name string) []ent.Value { - return nil -} - -// ClearedEdges returns all edge names that were cleared in this mutation. -func (m *BackupJobEventMutation) ClearedEdges() []string { - edges := make([]string, 0, 1) - if m.clearedjob { - edges = append(edges, backupjobevent.EdgeJob) - } - return edges -} - -// EdgeCleared returns a boolean which indicates if the edge with the given name -// was cleared in this mutation. -func (m *BackupJobEventMutation) EdgeCleared(name string) bool { - switch name { - case backupjobevent.EdgeJob: - return m.clearedjob - } - return false -} - -// ClearEdge clears the value of the edge with the given name. It returns an error -// if that edge is not defined in the schema. -func (m *BackupJobEventMutation) ClearEdge(name string) error { - switch name { - case backupjobevent.EdgeJob: - m.ClearJob() - return nil - } - return fmt.Errorf("unknown BackupJobEvent unique edge %s", name) -} - -// ResetEdge resets all changes to the edge with the given name in this mutation. -// It returns an error if the edge is not defined in the schema. -func (m *BackupJobEventMutation) ResetEdge(name string) error { - switch name { - case backupjobevent.EdgeJob: - m.ResetJob() - return nil - } - return fmt.Errorf("unknown BackupJobEvent edge %s", name) -} - -// BackupS3ConfigMutation represents an operation that mutates the BackupS3Config nodes in the graph. -type BackupS3ConfigMutation struct { - config - op Op - typ string - id *int - profile_id *string - name *string - is_active *bool - enabled *bool - endpoint *string - region *string - bucket *string - access_key_id *string - secret_access_key_encrypted *string - prefix *string - force_path_style *bool - use_ssl *bool - created_at *time.Time - updated_at *time.Time - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*BackupS3Config, error) - predicates []predicate.BackupS3Config -} - -var _ ent.Mutation = (*BackupS3ConfigMutation)(nil) - -// backups3configOption allows management of the mutation configuration using functional options. -type backups3configOption func(*BackupS3ConfigMutation) - -// newBackupS3ConfigMutation creates new mutation for the BackupS3Config entity. -func newBackupS3ConfigMutation(c config, op Op, opts ...backups3configOption) *BackupS3ConfigMutation { - m := &BackupS3ConfigMutation{ - config: c, - op: op, - typ: TypeBackupS3Config, - clearedFields: make(map[string]struct{}), - } - for _, opt := range opts { - opt(m) - } - return m -} - -// withBackupS3ConfigID sets the ID field of the mutation. -func withBackupS3ConfigID(id int) backups3configOption { - return func(m *BackupS3ConfigMutation) { - var ( - err error - once sync.Once - value *BackupS3Config - ) - m.oldValue = func(ctx context.Context) (*BackupS3Config, error) { - once.Do(func() { - if m.done { - err = errors.New("querying old values post mutation is not allowed") - } else { - value, err = m.Client().BackupS3Config.Get(ctx, id) - } - }) - return value, err - } - m.id = &id - } -} - -// withBackupS3Config sets the old BackupS3Config of the mutation. -func withBackupS3Config(node *BackupS3Config) backups3configOption { - return func(m *BackupS3ConfigMutation) { - m.oldValue = func(context.Context) (*BackupS3Config, error) { - return node, nil - } - m.id = &node.ID - } -} - -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m BackupS3ConfigMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client -} - -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m BackupS3ConfigMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("ent: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil -} - -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *BackupS3ConfigMutation) ID() (id int, exists bool) { - if m.id == nil { - return - } - return *m.id, true -} - -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *BackupS3ConfigMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().BackupS3Config.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) - } -} - -// SetProfileID sets the "profile_id" field. -func (m *BackupS3ConfigMutation) SetProfileID(s string) { - m.profile_id = &s -} - -// ProfileID returns the value of the "profile_id" field in the mutation. -func (m *BackupS3ConfigMutation) ProfileID() (r string, exists bool) { - v := m.profile_id - if v == nil { - return - } - return *v, true -} - -// OldProfileID returns the old "profile_id" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldProfileID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldProfileID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldProfileID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldProfileID: %w", err) - } - return oldValue.ProfileID, nil -} - -// ResetProfileID resets all changes to the "profile_id" field. -func (m *BackupS3ConfigMutation) ResetProfileID() { - m.profile_id = nil -} - -// SetName sets the "name" field. -func (m *BackupS3ConfigMutation) SetName(s string) { - m.name = &s -} - -// Name returns the value of the "name" field in the mutation. -func (m *BackupS3ConfigMutation) Name() (r string, exists bool) { - v := m.name - if v == nil { - return - } - return *v, true -} - -// OldName returns the old "name" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldName(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) - } - return oldValue.Name, nil -} - -// ResetName resets all changes to the "name" field. -func (m *BackupS3ConfigMutation) ResetName() { - m.name = nil -} - -// SetIsActive sets the "is_active" field. -func (m *BackupS3ConfigMutation) SetIsActive(b bool) { - m.is_active = &b -} - -// IsActive returns the value of the "is_active" field in the mutation. -func (m *BackupS3ConfigMutation) IsActive() (r bool, exists bool) { - v := m.is_active - if v == nil { - return - } - return *v, true -} - -// OldIsActive returns the old "is_active" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldIsActive(ctx context.Context) (v bool, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldIsActive is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldIsActive requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldIsActive: %w", err) - } - return oldValue.IsActive, nil -} - -// ResetIsActive resets all changes to the "is_active" field. -func (m *BackupS3ConfigMutation) ResetIsActive() { - m.is_active = nil -} - -// SetEnabled sets the "enabled" field. -func (m *BackupS3ConfigMutation) SetEnabled(b bool) { - m.enabled = &b -} - -// Enabled returns the value of the "enabled" field in the mutation. -func (m *BackupS3ConfigMutation) Enabled() (r bool, exists bool) { - v := m.enabled - if v == nil { - return - } - return *v, true -} - -// OldEnabled returns the old "enabled" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldEnabled(ctx context.Context) (v bool, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldEnabled is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldEnabled requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldEnabled: %w", err) - } - return oldValue.Enabled, nil -} - -// ResetEnabled resets all changes to the "enabled" field. -func (m *BackupS3ConfigMutation) ResetEnabled() { - m.enabled = nil -} - -// SetEndpoint sets the "endpoint" field. -func (m *BackupS3ConfigMutation) SetEndpoint(s string) { - m.endpoint = &s -} - -// Endpoint returns the value of the "endpoint" field in the mutation. -func (m *BackupS3ConfigMutation) Endpoint() (r string, exists bool) { - v := m.endpoint - if v == nil { - return - } - return *v, true -} - -// OldEndpoint returns the old "endpoint" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldEndpoint(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldEndpoint is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldEndpoint requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldEndpoint: %w", err) - } - return oldValue.Endpoint, nil -} - -// ResetEndpoint resets all changes to the "endpoint" field. -func (m *BackupS3ConfigMutation) ResetEndpoint() { - m.endpoint = nil -} - -// SetRegion sets the "region" field. -func (m *BackupS3ConfigMutation) SetRegion(s string) { - m.region = &s -} - -// Region returns the value of the "region" field in the mutation. -func (m *BackupS3ConfigMutation) Region() (r string, exists bool) { - v := m.region - if v == nil { - return - } - return *v, true -} - -// OldRegion returns the old "region" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldRegion(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRegion is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRegion requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldRegion: %w", err) - } - return oldValue.Region, nil -} - -// ResetRegion resets all changes to the "region" field. -func (m *BackupS3ConfigMutation) ResetRegion() { - m.region = nil -} - -// SetBucket sets the "bucket" field. -func (m *BackupS3ConfigMutation) SetBucket(s string) { - m.bucket = &s -} - -// Bucket returns the value of the "bucket" field in the mutation. -func (m *BackupS3ConfigMutation) Bucket() (r string, exists bool) { - v := m.bucket - if v == nil { - return - } - return *v, true -} - -// OldBucket returns the old "bucket" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldBucket(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldBucket is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldBucket requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldBucket: %w", err) - } - return oldValue.Bucket, nil -} - -// ResetBucket resets all changes to the "bucket" field. -func (m *BackupS3ConfigMutation) ResetBucket() { - m.bucket = nil -} - -// SetAccessKeyID sets the "access_key_id" field. -func (m *BackupS3ConfigMutation) SetAccessKeyID(s string) { - m.access_key_id = &s -} - -// AccessKeyID returns the value of the "access_key_id" field in the mutation. -func (m *BackupS3ConfigMutation) AccessKeyID() (r string, exists bool) { - v := m.access_key_id - if v == nil { - return - } - return *v, true -} - -// OldAccessKeyID returns the old "access_key_id" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldAccessKeyID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldAccessKeyID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldAccessKeyID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldAccessKeyID: %w", err) - } - return oldValue.AccessKeyID, nil -} - -// ResetAccessKeyID resets all changes to the "access_key_id" field. -func (m *BackupS3ConfigMutation) ResetAccessKeyID() { - m.access_key_id = nil -} - -// SetSecretAccessKeyEncrypted sets the "secret_access_key_encrypted" field. -func (m *BackupS3ConfigMutation) SetSecretAccessKeyEncrypted(s string) { - m.secret_access_key_encrypted = &s -} - -// SecretAccessKeyEncrypted returns the value of the "secret_access_key_encrypted" field in the mutation. -func (m *BackupS3ConfigMutation) SecretAccessKeyEncrypted() (r string, exists bool) { - v := m.secret_access_key_encrypted - if v == nil { - return - } - return *v, true -} - -// OldSecretAccessKeyEncrypted returns the old "secret_access_key_encrypted" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldSecretAccessKeyEncrypted(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSecretAccessKeyEncrypted is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSecretAccessKeyEncrypted requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldSecretAccessKeyEncrypted: %w", err) - } - return oldValue.SecretAccessKeyEncrypted, nil -} - -// ClearSecretAccessKeyEncrypted clears the value of the "secret_access_key_encrypted" field. -func (m *BackupS3ConfigMutation) ClearSecretAccessKeyEncrypted() { - m.secret_access_key_encrypted = nil - m.clearedFields[backups3config.FieldSecretAccessKeyEncrypted] = struct{}{} -} - -// SecretAccessKeyEncryptedCleared returns if the "secret_access_key_encrypted" field was cleared in this mutation. -func (m *BackupS3ConfigMutation) SecretAccessKeyEncryptedCleared() bool { - _, ok := m.clearedFields[backups3config.FieldSecretAccessKeyEncrypted] - return ok -} - -// ResetSecretAccessKeyEncrypted resets all changes to the "secret_access_key_encrypted" field. -func (m *BackupS3ConfigMutation) ResetSecretAccessKeyEncrypted() { - m.secret_access_key_encrypted = nil - delete(m.clearedFields, backups3config.FieldSecretAccessKeyEncrypted) -} - -// SetPrefix sets the "prefix" field. -func (m *BackupS3ConfigMutation) SetPrefix(s string) { - m.prefix = &s -} - -// Prefix returns the value of the "prefix" field in the mutation. -func (m *BackupS3ConfigMutation) Prefix() (r string, exists bool) { - v := m.prefix - if v == nil { - return - } - return *v, true -} - -// OldPrefix returns the old "prefix" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldPrefix(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPrefix is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPrefix requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPrefix: %w", err) - } - return oldValue.Prefix, nil -} - -// ResetPrefix resets all changes to the "prefix" field. -func (m *BackupS3ConfigMutation) ResetPrefix() { - m.prefix = nil -} - -// SetForcePathStyle sets the "force_path_style" field. -func (m *BackupS3ConfigMutation) SetForcePathStyle(b bool) { - m.force_path_style = &b -} - -// ForcePathStyle returns the value of the "force_path_style" field in the mutation. -func (m *BackupS3ConfigMutation) ForcePathStyle() (r bool, exists bool) { - v := m.force_path_style - if v == nil { - return - } - return *v, true -} - -// OldForcePathStyle returns the old "force_path_style" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldForcePathStyle(ctx context.Context) (v bool, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldForcePathStyle is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldForcePathStyle requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldForcePathStyle: %w", err) - } - return oldValue.ForcePathStyle, nil -} - -// ResetForcePathStyle resets all changes to the "force_path_style" field. -func (m *BackupS3ConfigMutation) ResetForcePathStyle() { - m.force_path_style = nil -} - -// SetUseSsl sets the "use_ssl" field. -func (m *BackupS3ConfigMutation) SetUseSsl(b bool) { - m.use_ssl = &b -} - -// UseSsl returns the value of the "use_ssl" field in the mutation. -func (m *BackupS3ConfigMutation) UseSsl() (r bool, exists bool) { - v := m.use_ssl - if v == nil { - return - } - return *v, true -} - -// OldUseSsl returns the old "use_ssl" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldUseSsl(ctx context.Context) (v bool, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUseSsl is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUseSsl requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUseSsl: %w", err) - } - return oldValue.UseSsl, nil -} - -// ResetUseSsl resets all changes to the "use_ssl" field. -func (m *BackupS3ConfigMutation) ResetUseSsl() { - m.use_ssl = nil -} - -// SetCreatedAt sets the "created_at" field. -func (m *BackupS3ConfigMutation) SetCreatedAt(t time.Time) { - m.created_at = &t -} - -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *BackupS3ConfigMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at - if v == nil { - return - } - return *v, true -} - -// OldCreatedAt returns the old "created_at" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) - } - return oldValue.CreatedAt, nil -} - -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *BackupS3ConfigMutation) ResetCreatedAt() { - m.created_at = nil -} - -// SetUpdatedAt sets the "updated_at" field. -func (m *BackupS3ConfigMutation) SetUpdatedAt(t time.Time) { - m.updated_at = &t -} - -// UpdatedAt returns the value of the "updated_at" field in the mutation. -func (m *BackupS3ConfigMutation) UpdatedAt() (r time.Time, exists bool) { - v := m.updated_at - if v == nil { - return - } - return *v, true -} - -// OldUpdatedAt returns the old "updated_at" field's value of the BackupS3Config entity. -// If the BackupS3Config object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupS3ConfigMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUpdatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) - } - return oldValue.UpdatedAt, nil -} - -// ResetUpdatedAt resets all changes to the "updated_at" field. -func (m *BackupS3ConfigMutation) ResetUpdatedAt() { - m.updated_at = nil -} - -// Where appends a list predicates to the BackupS3ConfigMutation builder. -func (m *BackupS3ConfigMutation) Where(ps ...predicate.BackupS3Config) { - m.predicates = append(m.predicates, ps...) -} - -// WhereP appends storage-level predicates to the BackupS3ConfigMutation builder. Using this method, -// users can use type-assertion to append predicates that do not depend on any generated package. -func (m *BackupS3ConfigMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.BackupS3Config, len(ps)) - for i := range ps { - p[i] = ps[i] - } - m.Where(p...) -} - -// Op returns the operation name. -func (m *BackupS3ConfigMutation) Op() Op { - return m.op -} - -// SetOp allows setting the mutation operation. -func (m *BackupS3ConfigMutation) SetOp(op Op) { - m.op = op -} - -// Type returns the node type of this mutation (BackupS3Config). -func (m *BackupS3ConfigMutation) Type() string { - return m.typ -} - -// Fields returns all fields that were changed during this mutation. Note that in -// order to get all numeric fields that were incremented/decremented, call -// AddedFields(). -func (m *BackupS3ConfigMutation) Fields() []string { - fields := make([]string, 0, 14) - if m.profile_id != nil { - fields = append(fields, backups3config.FieldProfileID) - } - if m.name != nil { - fields = append(fields, backups3config.FieldName) - } - if m.is_active != nil { - fields = append(fields, backups3config.FieldIsActive) - } - if m.enabled != nil { - fields = append(fields, backups3config.FieldEnabled) - } - if m.endpoint != nil { - fields = append(fields, backups3config.FieldEndpoint) - } - if m.region != nil { - fields = append(fields, backups3config.FieldRegion) - } - if m.bucket != nil { - fields = append(fields, backups3config.FieldBucket) - } - if m.access_key_id != nil { - fields = append(fields, backups3config.FieldAccessKeyID) - } - if m.secret_access_key_encrypted != nil { - fields = append(fields, backups3config.FieldSecretAccessKeyEncrypted) - } - if m.prefix != nil { - fields = append(fields, backups3config.FieldPrefix) - } - if m.force_path_style != nil { - fields = append(fields, backups3config.FieldForcePathStyle) - } - if m.use_ssl != nil { - fields = append(fields, backups3config.FieldUseSsl) - } - if m.created_at != nil { - fields = append(fields, backups3config.FieldCreatedAt) - } - if m.updated_at != nil { - fields = append(fields, backups3config.FieldUpdatedAt) - } - return fields -} - -// Field returns the value of a field with the given name. The second boolean -// return value indicates that this field was not set, or was not defined in the -// schema. -func (m *BackupS3ConfigMutation) Field(name string) (ent.Value, bool) { - switch name { - case backups3config.FieldProfileID: - return m.ProfileID() - case backups3config.FieldName: - return m.Name() - case backups3config.FieldIsActive: - return m.IsActive() - case backups3config.FieldEnabled: - return m.Enabled() - case backups3config.FieldEndpoint: - return m.Endpoint() - case backups3config.FieldRegion: - return m.Region() - case backups3config.FieldBucket: - return m.Bucket() - case backups3config.FieldAccessKeyID: - return m.AccessKeyID() - case backups3config.FieldSecretAccessKeyEncrypted: - return m.SecretAccessKeyEncrypted() - case backups3config.FieldPrefix: - return m.Prefix() - case backups3config.FieldForcePathStyle: - return m.ForcePathStyle() - case backups3config.FieldUseSsl: - return m.UseSsl() - case backups3config.FieldCreatedAt: - return m.CreatedAt() - case backups3config.FieldUpdatedAt: - return m.UpdatedAt() - } - return nil, false -} - -// OldField returns the old value of the field from the database. An error is -// returned if the mutation operation is not UpdateOne, or the query to the -// database failed. -func (m *BackupS3ConfigMutation) OldField(ctx context.Context, name string) (ent.Value, error) { - switch name { - case backups3config.FieldProfileID: - return m.OldProfileID(ctx) - case backups3config.FieldName: - return m.OldName(ctx) - case backups3config.FieldIsActive: - return m.OldIsActive(ctx) - case backups3config.FieldEnabled: - return m.OldEnabled(ctx) - case backups3config.FieldEndpoint: - return m.OldEndpoint(ctx) - case backups3config.FieldRegion: - return m.OldRegion(ctx) - case backups3config.FieldBucket: - return m.OldBucket(ctx) - case backups3config.FieldAccessKeyID: - return m.OldAccessKeyID(ctx) - case backups3config.FieldSecretAccessKeyEncrypted: - return m.OldSecretAccessKeyEncrypted(ctx) - case backups3config.FieldPrefix: - return m.OldPrefix(ctx) - case backups3config.FieldForcePathStyle: - return m.OldForcePathStyle(ctx) - case backups3config.FieldUseSsl: - return m.OldUseSsl(ctx) - case backups3config.FieldCreatedAt: - return m.OldCreatedAt(ctx) - case backups3config.FieldUpdatedAt: - return m.OldUpdatedAt(ctx) - } - return nil, fmt.Errorf("unknown BackupS3Config field %s", name) -} - -// SetField sets the value of a field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupS3ConfigMutation) SetField(name string, value ent.Value) error { - switch name { - case backups3config.FieldProfileID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetProfileID(v) - return nil - case backups3config.FieldName: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetName(v) - return nil - case backups3config.FieldIsActive: - v, ok := value.(bool) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetIsActive(v) - return nil - case backups3config.FieldEnabled: - v, ok := value.(bool) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetEnabled(v) - return nil - case backups3config.FieldEndpoint: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetEndpoint(v) - return nil - case backups3config.FieldRegion: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRegion(v) - return nil - case backups3config.FieldBucket: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetBucket(v) - return nil - case backups3config.FieldAccessKeyID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetAccessKeyID(v) - return nil - case backups3config.FieldSecretAccessKeyEncrypted: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetSecretAccessKeyEncrypted(v) - return nil - case backups3config.FieldPrefix: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetPrefix(v) - return nil - case backups3config.FieldForcePathStyle: - v, ok := value.(bool) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetForcePathStyle(v) - return nil - case backups3config.FieldUseSsl: - v, ok := value.(bool) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUseSsl(v) - return nil - case backups3config.FieldCreatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetCreatedAt(v) - return nil - case backups3config.FieldUpdatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUpdatedAt(v) - return nil - } - return fmt.Errorf("unknown BackupS3Config field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *BackupS3ConfigMutation) AddedFields() []string { - return nil -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *BackupS3ConfigMutation) AddedField(name string) (ent.Value, bool) { - return nil, false -} - -// AddField adds the value to the field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupS3ConfigMutation) AddField(name string, value ent.Value) error { - switch name { - } - return fmt.Errorf("unknown BackupS3Config numeric field %s", name) -} - -// ClearedFields returns all nullable fields that were cleared during this -// mutation. -func (m *BackupS3ConfigMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(backups3config.FieldSecretAccessKeyEncrypted) { - fields = append(fields, backups3config.FieldSecretAccessKeyEncrypted) - } - return fields -} - -// FieldCleared returns a boolean indicating if a field with the given name was -// cleared in this mutation. -func (m *BackupS3ConfigMutation) FieldCleared(name string) bool { - _, ok := m.clearedFields[name] - return ok -} - -// ClearField clears the value of the field with the given name. It returns an -// error if the field is not defined in the schema. -func (m *BackupS3ConfigMutation) ClearField(name string) error { - switch name { - case backups3config.FieldSecretAccessKeyEncrypted: - m.ClearSecretAccessKeyEncrypted() - return nil - } - return fmt.Errorf("unknown BackupS3Config nullable field %s", name) -} - -// ResetField resets all changes in the mutation for the field with the given name. -// It returns an error if the field is not defined in the schema. -func (m *BackupS3ConfigMutation) ResetField(name string) error { - switch name { - case backups3config.FieldProfileID: - m.ResetProfileID() - return nil - case backups3config.FieldName: - m.ResetName() - return nil - case backups3config.FieldIsActive: - m.ResetIsActive() - return nil - case backups3config.FieldEnabled: - m.ResetEnabled() - return nil - case backups3config.FieldEndpoint: - m.ResetEndpoint() - return nil - case backups3config.FieldRegion: - m.ResetRegion() - return nil - case backups3config.FieldBucket: - m.ResetBucket() - return nil - case backups3config.FieldAccessKeyID: - m.ResetAccessKeyID() - return nil - case backups3config.FieldSecretAccessKeyEncrypted: - m.ResetSecretAccessKeyEncrypted() - return nil - case backups3config.FieldPrefix: - m.ResetPrefix() - return nil - case backups3config.FieldForcePathStyle: - m.ResetForcePathStyle() - return nil - case backups3config.FieldUseSsl: - m.ResetUseSsl() - return nil - case backups3config.FieldCreatedAt: - m.ResetCreatedAt() - return nil - case backups3config.FieldUpdatedAt: - m.ResetUpdatedAt() - return nil - } - return fmt.Errorf("unknown BackupS3Config field %s", name) -} - -// AddedEdges returns all edge names that were set/added in this mutation. -func (m *BackupS3ConfigMutation) AddedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// AddedIDs returns all IDs (to other nodes) that were added for the given edge -// name in this mutation. -func (m *BackupS3ConfigMutation) AddedIDs(name string) []ent.Value { - return nil -} - -// RemovedEdges returns all edge names that were removed in this mutation. -func (m *BackupS3ConfigMutation) RemovedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with -// the given name in this mutation. -func (m *BackupS3ConfigMutation) RemovedIDs(name string) []ent.Value { - return nil -} - -// ClearedEdges returns all edge names that were cleared in this mutation. -func (m *BackupS3ConfigMutation) ClearedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// EdgeCleared returns a boolean which indicates if the edge with the given name -// was cleared in this mutation. -func (m *BackupS3ConfigMutation) EdgeCleared(name string) bool { - return false -} - -// ClearEdge clears the value of the edge with the given name. It returns an error -// if that edge is not defined in the schema. -func (m *BackupS3ConfigMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown BackupS3Config unique edge %s", name) -} - -// ResetEdge resets all changes to the edge with the given name in this mutation. -// It returns an error if the edge is not defined in the schema. -func (m *BackupS3ConfigMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown BackupS3Config edge %s", name) -} - -// BackupSettingMutation represents an operation that mutates the BackupSetting nodes in the graph. -type BackupSettingMutation struct { - config - op Op - typ string - id *int - source_mode *backupsetting.SourceMode - backup_root *string - retention_days *int - addretention_days *int - keep_last *int - addkeep_last *int - sqlite_path *string - created_at *time.Time - updated_at *time.Time - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*BackupSetting, error) - predicates []predicate.BackupSetting -} - -var _ ent.Mutation = (*BackupSettingMutation)(nil) - -// backupsettingOption allows management of the mutation configuration using functional options. -type backupsettingOption func(*BackupSettingMutation) - -// newBackupSettingMutation creates new mutation for the BackupSetting entity. -func newBackupSettingMutation(c config, op Op, opts ...backupsettingOption) *BackupSettingMutation { - m := &BackupSettingMutation{ - config: c, - op: op, - typ: TypeBackupSetting, - clearedFields: make(map[string]struct{}), - } - for _, opt := range opts { - opt(m) - } - return m -} - -// withBackupSettingID sets the ID field of the mutation. -func withBackupSettingID(id int) backupsettingOption { - return func(m *BackupSettingMutation) { - var ( - err error - once sync.Once - value *BackupSetting - ) - m.oldValue = func(ctx context.Context) (*BackupSetting, error) { - once.Do(func() { - if m.done { - err = errors.New("querying old values post mutation is not allowed") - } else { - value, err = m.Client().BackupSetting.Get(ctx, id) - } - }) - return value, err - } - m.id = &id - } -} - -// withBackupSetting sets the old BackupSetting of the mutation. -func withBackupSetting(node *BackupSetting) backupsettingOption { - return func(m *BackupSettingMutation) { - m.oldValue = func(context.Context) (*BackupSetting, error) { - return node, nil - } - m.id = &node.ID - } -} - -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m BackupSettingMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client -} - -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m BackupSettingMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("ent: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil -} - -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *BackupSettingMutation) ID() (id int, exists bool) { - if m.id == nil { - return - } - return *m.id, true -} - -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *BackupSettingMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().BackupSetting.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) - } -} - -// SetSourceMode sets the "source_mode" field. -func (m *BackupSettingMutation) SetSourceMode(bm backupsetting.SourceMode) { - m.source_mode = &bm -} - -// SourceMode returns the value of the "source_mode" field in the mutation. -func (m *BackupSettingMutation) SourceMode() (r backupsetting.SourceMode, exists bool) { - v := m.source_mode - if v == nil { - return - } - return *v, true -} - -// OldSourceMode returns the old "source_mode" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldSourceMode(ctx context.Context) (v backupsetting.SourceMode, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSourceMode is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSourceMode requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldSourceMode: %w", err) - } - return oldValue.SourceMode, nil -} - -// ResetSourceMode resets all changes to the "source_mode" field. -func (m *BackupSettingMutation) ResetSourceMode() { - m.source_mode = nil -} - -// SetBackupRoot sets the "backup_root" field. -func (m *BackupSettingMutation) SetBackupRoot(s string) { - m.backup_root = &s -} - -// BackupRoot returns the value of the "backup_root" field in the mutation. -func (m *BackupSettingMutation) BackupRoot() (r string, exists bool) { - v := m.backup_root - if v == nil { - return - } - return *v, true -} - -// OldBackupRoot returns the old "backup_root" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldBackupRoot(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldBackupRoot is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldBackupRoot requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldBackupRoot: %w", err) - } - return oldValue.BackupRoot, nil -} - -// ResetBackupRoot resets all changes to the "backup_root" field. -func (m *BackupSettingMutation) ResetBackupRoot() { - m.backup_root = nil -} - -// SetRetentionDays sets the "retention_days" field. -func (m *BackupSettingMutation) SetRetentionDays(i int) { - m.retention_days = &i - m.addretention_days = nil -} - -// RetentionDays returns the value of the "retention_days" field in the mutation. -func (m *BackupSettingMutation) RetentionDays() (r int, exists bool) { - v := m.retention_days - if v == nil { - return - } - return *v, true -} - -// OldRetentionDays returns the old "retention_days" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldRetentionDays(ctx context.Context) (v int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRetentionDays is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRetentionDays requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldRetentionDays: %w", err) - } - return oldValue.RetentionDays, nil -} - -// AddRetentionDays adds i to the "retention_days" field. -func (m *BackupSettingMutation) AddRetentionDays(i int) { - if m.addretention_days != nil { - *m.addretention_days += i - } else { - m.addretention_days = &i - } -} - -// AddedRetentionDays returns the value that was added to the "retention_days" field in this mutation. -func (m *BackupSettingMutation) AddedRetentionDays() (r int, exists bool) { - v := m.addretention_days - if v == nil { - return - } - return *v, true -} - -// ResetRetentionDays resets all changes to the "retention_days" field. -func (m *BackupSettingMutation) ResetRetentionDays() { - m.retention_days = nil - m.addretention_days = nil -} - -// SetKeepLast sets the "keep_last" field. -func (m *BackupSettingMutation) SetKeepLast(i int) { - m.keep_last = &i - m.addkeep_last = nil -} - -// KeepLast returns the value of the "keep_last" field in the mutation. -func (m *BackupSettingMutation) KeepLast() (r int, exists bool) { - v := m.keep_last - if v == nil { - return - } - return *v, true -} - -// OldKeepLast returns the old "keep_last" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldKeepLast(ctx context.Context) (v int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldKeepLast is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldKeepLast requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldKeepLast: %w", err) - } - return oldValue.KeepLast, nil -} - -// AddKeepLast adds i to the "keep_last" field. -func (m *BackupSettingMutation) AddKeepLast(i int) { - if m.addkeep_last != nil { - *m.addkeep_last += i - } else { - m.addkeep_last = &i - } -} - -// AddedKeepLast returns the value that was added to the "keep_last" field in this mutation. -func (m *BackupSettingMutation) AddedKeepLast() (r int, exists bool) { - v := m.addkeep_last - if v == nil { - return - } - return *v, true -} - -// ResetKeepLast resets all changes to the "keep_last" field. -func (m *BackupSettingMutation) ResetKeepLast() { - m.keep_last = nil - m.addkeep_last = nil -} - -// SetSqlitePath sets the "sqlite_path" field. -func (m *BackupSettingMutation) SetSqlitePath(s string) { - m.sqlite_path = &s -} - -// SqlitePath returns the value of the "sqlite_path" field in the mutation. -func (m *BackupSettingMutation) SqlitePath() (r string, exists bool) { - v := m.sqlite_path - if v == nil { - return - } - return *v, true -} - -// OldSqlitePath returns the old "sqlite_path" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldSqlitePath(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSqlitePath is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSqlitePath requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldSqlitePath: %w", err) - } - return oldValue.SqlitePath, nil -} - -// ResetSqlitePath resets all changes to the "sqlite_path" field. -func (m *BackupSettingMutation) ResetSqlitePath() { - m.sqlite_path = nil -} - -// SetCreatedAt sets the "created_at" field. -func (m *BackupSettingMutation) SetCreatedAt(t time.Time) { - m.created_at = &t -} - -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *BackupSettingMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at - if v == nil { - return - } - return *v, true -} - -// OldCreatedAt returns the old "created_at" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) - } - return oldValue.CreatedAt, nil -} - -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *BackupSettingMutation) ResetCreatedAt() { - m.created_at = nil -} - -// SetUpdatedAt sets the "updated_at" field. -func (m *BackupSettingMutation) SetUpdatedAt(t time.Time) { - m.updated_at = &t -} - -// UpdatedAt returns the value of the "updated_at" field in the mutation. -func (m *BackupSettingMutation) UpdatedAt() (r time.Time, exists bool) { - v := m.updated_at - if v == nil { - return - } - return *v, true -} - -// OldUpdatedAt returns the old "updated_at" field's value of the BackupSetting entity. -// If the BackupSetting object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSettingMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUpdatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) - } - return oldValue.UpdatedAt, nil -} - -// ResetUpdatedAt resets all changes to the "updated_at" field. -func (m *BackupSettingMutation) ResetUpdatedAt() { - m.updated_at = nil -} - -// Where appends a list predicates to the BackupSettingMutation builder. -func (m *BackupSettingMutation) Where(ps ...predicate.BackupSetting) { - m.predicates = append(m.predicates, ps...) -} - -// WhereP appends storage-level predicates to the BackupSettingMutation builder. Using this method, -// users can use type-assertion to append predicates that do not depend on any generated package. -func (m *BackupSettingMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.BackupSetting, len(ps)) - for i := range ps { - p[i] = ps[i] - } - m.Where(p...) -} - -// Op returns the operation name. -func (m *BackupSettingMutation) Op() Op { - return m.op -} - -// SetOp allows setting the mutation operation. -func (m *BackupSettingMutation) SetOp(op Op) { - m.op = op -} - -// Type returns the node type of this mutation (BackupSetting). -func (m *BackupSettingMutation) Type() string { - return m.typ -} - -// Fields returns all fields that were changed during this mutation. Note that in -// order to get all numeric fields that were incremented/decremented, call -// AddedFields(). -func (m *BackupSettingMutation) Fields() []string { - fields := make([]string, 0, 7) - if m.source_mode != nil { - fields = append(fields, backupsetting.FieldSourceMode) - } - if m.backup_root != nil { - fields = append(fields, backupsetting.FieldBackupRoot) - } - if m.retention_days != nil { - fields = append(fields, backupsetting.FieldRetentionDays) - } - if m.keep_last != nil { - fields = append(fields, backupsetting.FieldKeepLast) - } - if m.sqlite_path != nil { - fields = append(fields, backupsetting.FieldSqlitePath) - } - if m.created_at != nil { - fields = append(fields, backupsetting.FieldCreatedAt) - } - if m.updated_at != nil { - fields = append(fields, backupsetting.FieldUpdatedAt) - } - return fields -} - -// Field returns the value of a field with the given name. The second boolean -// return value indicates that this field was not set, or was not defined in the -// schema. -func (m *BackupSettingMutation) Field(name string) (ent.Value, bool) { - switch name { - case backupsetting.FieldSourceMode: - return m.SourceMode() - case backupsetting.FieldBackupRoot: - return m.BackupRoot() - case backupsetting.FieldRetentionDays: - return m.RetentionDays() - case backupsetting.FieldKeepLast: - return m.KeepLast() - case backupsetting.FieldSqlitePath: - return m.SqlitePath() - case backupsetting.FieldCreatedAt: - return m.CreatedAt() - case backupsetting.FieldUpdatedAt: - return m.UpdatedAt() - } - return nil, false -} - -// OldField returns the old value of the field from the database. An error is -// returned if the mutation operation is not UpdateOne, or the query to the -// database failed. -func (m *BackupSettingMutation) OldField(ctx context.Context, name string) (ent.Value, error) { - switch name { - case backupsetting.FieldSourceMode: - return m.OldSourceMode(ctx) - case backupsetting.FieldBackupRoot: - return m.OldBackupRoot(ctx) - case backupsetting.FieldRetentionDays: - return m.OldRetentionDays(ctx) - case backupsetting.FieldKeepLast: - return m.OldKeepLast(ctx) - case backupsetting.FieldSqlitePath: - return m.OldSqlitePath(ctx) - case backupsetting.FieldCreatedAt: - return m.OldCreatedAt(ctx) - case backupsetting.FieldUpdatedAt: - return m.OldUpdatedAt(ctx) - } - return nil, fmt.Errorf("unknown BackupSetting field %s", name) -} - -// SetField sets the value of a field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupSettingMutation) SetField(name string, value ent.Value) error { - switch name { - case backupsetting.FieldSourceMode: - v, ok := value.(backupsetting.SourceMode) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetSourceMode(v) - return nil - case backupsetting.FieldBackupRoot: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetBackupRoot(v) - return nil - case backupsetting.FieldRetentionDays: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRetentionDays(v) - return nil - case backupsetting.FieldKeepLast: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetKeepLast(v) - return nil - case backupsetting.FieldSqlitePath: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetSqlitePath(v) - return nil - case backupsetting.FieldCreatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetCreatedAt(v) - return nil - case backupsetting.FieldUpdatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUpdatedAt(v) - return nil - } - return fmt.Errorf("unknown BackupSetting field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *BackupSettingMutation) AddedFields() []string { - var fields []string - if m.addretention_days != nil { - fields = append(fields, backupsetting.FieldRetentionDays) - } - if m.addkeep_last != nil { - fields = append(fields, backupsetting.FieldKeepLast) - } - return fields -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *BackupSettingMutation) AddedField(name string) (ent.Value, bool) { - switch name { - case backupsetting.FieldRetentionDays: - return m.AddedRetentionDays() - case backupsetting.FieldKeepLast: - return m.AddedKeepLast() - } - return nil, false -} - -// AddField adds the value to the field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupSettingMutation) AddField(name string, value ent.Value) error { - switch name { - case backupsetting.FieldRetentionDays: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddRetentionDays(v) - return nil - case backupsetting.FieldKeepLast: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddKeepLast(v) - return nil - } - return fmt.Errorf("unknown BackupSetting numeric field %s", name) -} - -// ClearedFields returns all nullable fields that were cleared during this -// mutation. -func (m *BackupSettingMutation) ClearedFields() []string { - return nil -} - -// FieldCleared returns a boolean indicating if a field with the given name was -// cleared in this mutation. -func (m *BackupSettingMutation) FieldCleared(name string) bool { - _, ok := m.clearedFields[name] - return ok -} - -// ClearField clears the value of the field with the given name. It returns an -// error if the field is not defined in the schema. -func (m *BackupSettingMutation) ClearField(name string) error { - return fmt.Errorf("unknown BackupSetting nullable field %s", name) -} - -// ResetField resets all changes in the mutation for the field with the given name. -// It returns an error if the field is not defined in the schema. -func (m *BackupSettingMutation) ResetField(name string) error { - switch name { - case backupsetting.FieldSourceMode: - m.ResetSourceMode() - return nil - case backupsetting.FieldBackupRoot: - m.ResetBackupRoot() - return nil - case backupsetting.FieldRetentionDays: - m.ResetRetentionDays() - return nil - case backupsetting.FieldKeepLast: - m.ResetKeepLast() - return nil - case backupsetting.FieldSqlitePath: - m.ResetSqlitePath() - return nil - case backupsetting.FieldCreatedAt: - m.ResetCreatedAt() - return nil - case backupsetting.FieldUpdatedAt: - m.ResetUpdatedAt() - return nil - } - return fmt.Errorf("unknown BackupSetting field %s", name) -} - -// AddedEdges returns all edge names that were set/added in this mutation. -func (m *BackupSettingMutation) AddedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// AddedIDs returns all IDs (to other nodes) that were added for the given edge -// name in this mutation. -func (m *BackupSettingMutation) AddedIDs(name string) []ent.Value { - return nil -} - -// RemovedEdges returns all edge names that were removed in this mutation. -func (m *BackupSettingMutation) RemovedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with -// the given name in this mutation. -func (m *BackupSettingMutation) RemovedIDs(name string) []ent.Value { - return nil -} - -// ClearedEdges returns all edge names that were cleared in this mutation. -func (m *BackupSettingMutation) ClearedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// EdgeCleared returns a boolean which indicates if the edge with the given name -// was cleared in this mutation. -func (m *BackupSettingMutation) EdgeCleared(name string) bool { - return false -} - -// ClearEdge clears the value of the edge with the given name. It returns an error -// if that edge is not defined in the schema. -func (m *BackupSettingMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown BackupSetting unique edge %s", name) -} - -// ResetEdge resets all changes to the edge with the given name in this mutation. -// It returns an error if the edge is not defined in the schema. -func (m *BackupSettingMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown BackupSetting edge %s", name) -} - -// BackupSourceConfigMutation represents an operation that mutates the BackupSourceConfig nodes in the graph. -type BackupSourceConfigMutation struct { - config - op Op - typ string - id *int - source_type *backupsourceconfig.SourceType - profile_id *string - name *string - is_active *bool - host *string - port *int - addport *int - username *string - password_encrypted *string - database *string - ssl_mode *string - addr *string - redis_db *int - addredis_db *int - container_name *string - created_at *time.Time - updated_at *time.Time - clearedFields map[string]struct{} - done bool - oldValue func(context.Context) (*BackupSourceConfig, error) - predicates []predicate.BackupSourceConfig -} - -var _ ent.Mutation = (*BackupSourceConfigMutation)(nil) - -// backupsourceconfigOption allows management of the mutation configuration using functional options. -type backupsourceconfigOption func(*BackupSourceConfigMutation) - -// newBackupSourceConfigMutation creates new mutation for the BackupSourceConfig entity. -func newBackupSourceConfigMutation(c config, op Op, opts ...backupsourceconfigOption) *BackupSourceConfigMutation { - m := &BackupSourceConfigMutation{ - config: c, - op: op, - typ: TypeBackupSourceConfig, - clearedFields: make(map[string]struct{}), - } - for _, opt := range opts { - opt(m) - } - return m -} - -// withBackupSourceConfigID sets the ID field of the mutation. -func withBackupSourceConfigID(id int) backupsourceconfigOption { - return func(m *BackupSourceConfigMutation) { - var ( - err error - once sync.Once - value *BackupSourceConfig - ) - m.oldValue = func(ctx context.Context) (*BackupSourceConfig, error) { - once.Do(func() { - if m.done { - err = errors.New("querying old values post mutation is not allowed") - } else { - value, err = m.Client().BackupSourceConfig.Get(ctx, id) - } - }) - return value, err - } - m.id = &id - } -} - -// withBackupSourceConfig sets the old BackupSourceConfig of the mutation. -func withBackupSourceConfig(node *BackupSourceConfig) backupsourceconfigOption { - return func(m *BackupSourceConfigMutation) { - m.oldValue = func(context.Context) (*BackupSourceConfig, error) { - return node, nil - } - m.id = &node.ID - } -} - -// Client returns a new `ent.Client` from the mutation. If the mutation was -// executed in a transaction (ent.Tx), a transactional client is returned. -func (m BackupSourceConfigMutation) Client() *Client { - client := &Client{config: m.config} - client.init() - return client -} - -// Tx returns an `ent.Tx` for mutations that were executed in transactions; -// it returns an error otherwise. -func (m BackupSourceConfigMutation) Tx() (*Tx, error) { - if _, ok := m.driver.(*txDriver); !ok { - return nil, errors.New("ent: mutation is not running in a transaction") - } - tx := &Tx{config: m.config} - tx.init() - return tx, nil -} - -// ID returns the ID value in the mutation. Note that the ID is only available -// if it was provided to the builder or after it was returned from the database. -func (m *BackupSourceConfigMutation) ID() (id int, exists bool) { - if m.id == nil { - return - } - return *m.id, true -} - -// IDs queries the database and returns the entity ids that match the mutation's predicate. -// That means, if the mutation is applied within a transaction with an isolation level such -// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated -// or updated by the mutation. -func (m *BackupSourceConfigMutation) IDs(ctx context.Context) ([]int, error) { - switch { - case m.op.Is(OpUpdateOne | OpDeleteOne): - id, exists := m.ID() - if exists { - return []int{id}, nil - } - fallthrough - case m.op.Is(OpUpdate | OpDelete): - return m.Client().BackupSourceConfig.Query().Where(m.predicates...).IDs(ctx) - default: - return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) - } -} - -// SetSourceType sets the "source_type" field. -func (m *BackupSourceConfigMutation) SetSourceType(bt backupsourceconfig.SourceType) { - m.source_type = &bt -} - -// SourceType returns the value of the "source_type" field in the mutation. -func (m *BackupSourceConfigMutation) SourceType() (r backupsourceconfig.SourceType, exists bool) { - v := m.source_type - if v == nil { - return - } - return *v, true -} - -// OldSourceType returns the old "source_type" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldSourceType(ctx context.Context) (v backupsourceconfig.SourceType, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSourceType is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSourceType requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldSourceType: %w", err) - } - return oldValue.SourceType, nil -} - -// ResetSourceType resets all changes to the "source_type" field. -func (m *BackupSourceConfigMutation) ResetSourceType() { - m.source_type = nil -} - -// SetProfileID sets the "profile_id" field. -func (m *BackupSourceConfigMutation) SetProfileID(s string) { - m.profile_id = &s -} - -// ProfileID returns the value of the "profile_id" field in the mutation. -func (m *BackupSourceConfigMutation) ProfileID() (r string, exists bool) { - v := m.profile_id - if v == nil { - return - } - return *v, true -} - -// OldProfileID returns the old "profile_id" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldProfileID(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldProfileID is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldProfileID requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldProfileID: %w", err) - } - return oldValue.ProfileID, nil -} - -// ResetProfileID resets all changes to the "profile_id" field. -func (m *BackupSourceConfigMutation) ResetProfileID() { - m.profile_id = nil -} - -// SetName sets the "name" field. -func (m *BackupSourceConfigMutation) SetName(s string) { - m.name = &s -} - -// Name returns the value of the "name" field in the mutation. -func (m *BackupSourceConfigMutation) Name() (r string, exists bool) { - v := m.name - if v == nil { - return - } - return *v, true -} - -// OldName returns the old "name" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldName(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldName is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldName requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldName: %w", err) - } - return oldValue.Name, nil -} - -// ResetName resets all changes to the "name" field. -func (m *BackupSourceConfigMutation) ResetName() { - m.name = nil -} - -// SetIsActive sets the "is_active" field. -func (m *BackupSourceConfigMutation) SetIsActive(b bool) { - m.is_active = &b -} - -// IsActive returns the value of the "is_active" field in the mutation. -func (m *BackupSourceConfigMutation) IsActive() (r bool, exists bool) { - v := m.is_active - if v == nil { - return - } - return *v, true -} - -// OldIsActive returns the old "is_active" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldIsActive(ctx context.Context) (v bool, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldIsActive is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldIsActive requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldIsActive: %w", err) - } - return oldValue.IsActive, nil -} - -// ResetIsActive resets all changes to the "is_active" field. -func (m *BackupSourceConfigMutation) ResetIsActive() { - m.is_active = nil -} - -// SetHost sets the "host" field. -func (m *BackupSourceConfigMutation) SetHost(s string) { - m.host = &s -} - -// Host returns the value of the "host" field in the mutation. -func (m *BackupSourceConfigMutation) Host() (r string, exists bool) { - v := m.host - if v == nil { - return - } - return *v, true -} - -// OldHost returns the old "host" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldHost(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldHost is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldHost requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldHost: %w", err) - } - return oldValue.Host, nil -} - -// ClearHost clears the value of the "host" field. -func (m *BackupSourceConfigMutation) ClearHost() { - m.host = nil - m.clearedFields[backupsourceconfig.FieldHost] = struct{}{} -} - -// HostCleared returns if the "host" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) HostCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldHost] - return ok -} - -// ResetHost resets all changes to the "host" field. -func (m *BackupSourceConfigMutation) ResetHost() { - m.host = nil - delete(m.clearedFields, backupsourceconfig.FieldHost) -} - -// SetPort sets the "port" field. -func (m *BackupSourceConfigMutation) SetPort(i int) { - m.port = &i - m.addport = nil -} - -// Port returns the value of the "port" field in the mutation. -func (m *BackupSourceConfigMutation) Port() (r int, exists bool) { - v := m.port - if v == nil { - return - } - return *v, true -} - -// OldPort returns the old "port" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldPort(ctx context.Context) (v *int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPort is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPort requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPort: %w", err) - } - return oldValue.Port, nil -} - -// AddPort adds i to the "port" field. -func (m *BackupSourceConfigMutation) AddPort(i int) { - if m.addport != nil { - *m.addport += i - } else { - m.addport = &i - } -} - -// AddedPort returns the value that was added to the "port" field in this mutation. -func (m *BackupSourceConfigMutation) AddedPort() (r int, exists bool) { - v := m.addport - if v == nil { - return - } - return *v, true -} - -// ClearPort clears the value of the "port" field. -func (m *BackupSourceConfigMutation) ClearPort() { - m.port = nil - m.addport = nil - m.clearedFields[backupsourceconfig.FieldPort] = struct{}{} -} - -// PortCleared returns if the "port" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) PortCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldPort] - return ok -} - -// ResetPort resets all changes to the "port" field. -func (m *BackupSourceConfigMutation) ResetPort() { - m.port = nil - m.addport = nil - delete(m.clearedFields, backupsourceconfig.FieldPort) -} - -// SetUsername sets the "username" field. -func (m *BackupSourceConfigMutation) SetUsername(s string) { - m.username = &s -} - -// Username returns the value of the "username" field in the mutation. -func (m *BackupSourceConfigMutation) Username() (r string, exists bool) { - v := m.username - if v == nil { - return - } - return *v, true -} - -// OldUsername returns the old "username" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldUsername(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUsername is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUsername requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUsername: %w", err) - } - return oldValue.Username, nil -} - -// ClearUsername clears the value of the "username" field. -func (m *BackupSourceConfigMutation) ClearUsername() { - m.username = nil - m.clearedFields[backupsourceconfig.FieldUsername] = struct{}{} -} - -// UsernameCleared returns if the "username" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) UsernameCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldUsername] - return ok -} - -// ResetUsername resets all changes to the "username" field. -func (m *BackupSourceConfigMutation) ResetUsername() { - m.username = nil - delete(m.clearedFields, backupsourceconfig.FieldUsername) -} - -// SetPasswordEncrypted sets the "password_encrypted" field. -func (m *BackupSourceConfigMutation) SetPasswordEncrypted(s string) { - m.password_encrypted = &s -} - -// PasswordEncrypted returns the value of the "password_encrypted" field in the mutation. -func (m *BackupSourceConfigMutation) PasswordEncrypted() (r string, exists bool) { - v := m.password_encrypted - if v == nil { - return - } - return *v, true -} - -// OldPasswordEncrypted returns the old "password_encrypted" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldPasswordEncrypted(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldPasswordEncrypted is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldPasswordEncrypted requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldPasswordEncrypted: %w", err) - } - return oldValue.PasswordEncrypted, nil -} - -// ClearPasswordEncrypted clears the value of the "password_encrypted" field. -func (m *BackupSourceConfigMutation) ClearPasswordEncrypted() { - m.password_encrypted = nil - m.clearedFields[backupsourceconfig.FieldPasswordEncrypted] = struct{}{} -} - -// PasswordEncryptedCleared returns if the "password_encrypted" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) PasswordEncryptedCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldPasswordEncrypted] - return ok -} - -// ResetPasswordEncrypted resets all changes to the "password_encrypted" field. -func (m *BackupSourceConfigMutation) ResetPasswordEncrypted() { - m.password_encrypted = nil - delete(m.clearedFields, backupsourceconfig.FieldPasswordEncrypted) -} - -// SetDatabase sets the "database" field. -func (m *BackupSourceConfigMutation) SetDatabase(s string) { - m.database = &s -} - -// Database returns the value of the "database" field in the mutation. -func (m *BackupSourceConfigMutation) Database() (r string, exists bool) { - v := m.database - if v == nil { - return - } - return *v, true -} - -// OldDatabase returns the old "database" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldDatabase(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldDatabase is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldDatabase requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldDatabase: %w", err) - } - return oldValue.Database, nil -} - -// ClearDatabase clears the value of the "database" field. -func (m *BackupSourceConfigMutation) ClearDatabase() { - m.database = nil - m.clearedFields[backupsourceconfig.FieldDatabase] = struct{}{} -} - -// DatabaseCleared returns if the "database" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) DatabaseCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldDatabase] - return ok -} - -// ResetDatabase resets all changes to the "database" field. -func (m *BackupSourceConfigMutation) ResetDatabase() { - m.database = nil - delete(m.clearedFields, backupsourceconfig.FieldDatabase) -} - -// SetSslMode sets the "ssl_mode" field. -func (m *BackupSourceConfigMutation) SetSslMode(s string) { - m.ssl_mode = &s -} - -// SslMode returns the value of the "ssl_mode" field in the mutation. -func (m *BackupSourceConfigMutation) SslMode() (r string, exists bool) { - v := m.ssl_mode - if v == nil { - return - } - return *v, true -} - -// OldSslMode returns the old "ssl_mode" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldSslMode(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldSslMode is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldSslMode requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldSslMode: %w", err) - } - return oldValue.SslMode, nil -} - -// ClearSslMode clears the value of the "ssl_mode" field. -func (m *BackupSourceConfigMutation) ClearSslMode() { - m.ssl_mode = nil - m.clearedFields[backupsourceconfig.FieldSslMode] = struct{}{} -} - -// SslModeCleared returns if the "ssl_mode" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) SslModeCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldSslMode] - return ok -} - -// ResetSslMode resets all changes to the "ssl_mode" field. -func (m *BackupSourceConfigMutation) ResetSslMode() { - m.ssl_mode = nil - delete(m.clearedFields, backupsourceconfig.FieldSslMode) -} - -// SetAddr sets the "addr" field. -func (m *BackupSourceConfigMutation) SetAddr(s string) { - m.addr = &s -} - -// Addr returns the value of the "addr" field in the mutation. -func (m *BackupSourceConfigMutation) Addr() (r string, exists bool) { - v := m.addr - if v == nil { - return - } - return *v, true -} - -// OldAddr returns the old "addr" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldAddr(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldAddr is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldAddr requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldAddr: %w", err) - } - return oldValue.Addr, nil -} - -// ClearAddr clears the value of the "addr" field. -func (m *BackupSourceConfigMutation) ClearAddr() { - m.addr = nil - m.clearedFields[backupsourceconfig.FieldAddr] = struct{}{} -} - -// AddrCleared returns if the "addr" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) AddrCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldAddr] - return ok -} - -// ResetAddr resets all changes to the "addr" field. -func (m *BackupSourceConfigMutation) ResetAddr() { - m.addr = nil - delete(m.clearedFields, backupsourceconfig.FieldAddr) -} - -// SetRedisDb sets the "redis_db" field. -func (m *BackupSourceConfigMutation) SetRedisDb(i int) { - m.redis_db = &i - m.addredis_db = nil -} - -// RedisDb returns the value of the "redis_db" field in the mutation. -func (m *BackupSourceConfigMutation) RedisDb() (r int, exists bool) { - v := m.redis_db - if v == nil { - return - } - return *v, true -} - -// OldRedisDb returns the old "redis_db" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldRedisDb(ctx context.Context) (v *int, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldRedisDb is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldRedisDb requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldRedisDb: %w", err) - } - return oldValue.RedisDb, nil -} - -// AddRedisDb adds i to the "redis_db" field. -func (m *BackupSourceConfigMutation) AddRedisDb(i int) { - if m.addredis_db != nil { - *m.addredis_db += i - } else { - m.addredis_db = &i - } -} - -// AddedRedisDb returns the value that was added to the "redis_db" field in this mutation. -func (m *BackupSourceConfigMutation) AddedRedisDb() (r int, exists bool) { - v := m.addredis_db - if v == nil { - return - } - return *v, true -} - -// ClearRedisDb clears the value of the "redis_db" field. -func (m *BackupSourceConfigMutation) ClearRedisDb() { - m.redis_db = nil - m.addredis_db = nil - m.clearedFields[backupsourceconfig.FieldRedisDb] = struct{}{} -} - -// RedisDbCleared returns if the "redis_db" field was cleared in this mutation. -func (m *BackupSourceConfigMutation) RedisDbCleared() bool { - _, ok := m.clearedFields[backupsourceconfig.FieldRedisDb] - return ok -} - -// ResetRedisDb resets all changes to the "redis_db" field. -func (m *BackupSourceConfigMutation) ResetRedisDb() { - m.redis_db = nil - m.addredis_db = nil - delete(m.clearedFields, backupsourceconfig.FieldRedisDb) -} - -// SetContainerName sets the "container_name" field. -func (m *BackupSourceConfigMutation) SetContainerName(s string) { - m.container_name = &s -} - -// ContainerName returns the value of the "container_name" field in the mutation. -func (m *BackupSourceConfigMutation) ContainerName() (r string, exists bool) { - v := m.container_name - if v == nil { - return - } - return *v, true -} - -// OldContainerName returns the old "container_name" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldContainerName(ctx context.Context) (v string, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldContainerName is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldContainerName requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldContainerName: %w", err) - } - return oldValue.ContainerName, nil -} - -// ResetContainerName resets all changes to the "container_name" field. -func (m *BackupSourceConfigMutation) ResetContainerName() { - m.container_name = nil -} - -// SetCreatedAt sets the "created_at" field. -func (m *BackupSourceConfigMutation) SetCreatedAt(t time.Time) { - m.created_at = &t -} - -// CreatedAt returns the value of the "created_at" field in the mutation. -func (m *BackupSourceConfigMutation) CreatedAt() (r time.Time, exists bool) { - v := m.created_at - if v == nil { - return - } - return *v, true -} - -// OldCreatedAt returns the old "created_at" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldCreatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) - } - return oldValue.CreatedAt, nil -} - -// ResetCreatedAt resets all changes to the "created_at" field. -func (m *BackupSourceConfigMutation) ResetCreatedAt() { - m.created_at = nil -} - -// SetUpdatedAt sets the "updated_at" field. -func (m *BackupSourceConfigMutation) SetUpdatedAt(t time.Time) { - m.updated_at = &t -} - -// UpdatedAt returns the value of the "updated_at" field in the mutation. -func (m *BackupSourceConfigMutation) UpdatedAt() (r time.Time, exists bool) { - v := m.updated_at - if v == nil { - return - } - return *v, true -} - -// OldUpdatedAt returns the old "updated_at" field's value of the BackupSourceConfig entity. -// If the BackupSourceConfig object wasn't provided to the builder, the object is fetched from the database. -// An error is returned if the mutation operation is not UpdateOne, or the database query fails. -func (m *BackupSourceConfigMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { - if !m.op.Is(OpUpdateOne) { - return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") - } - if m.id == nil || m.oldValue == nil { - return v, errors.New("OldUpdatedAt requires an ID field in the mutation") - } - oldValue, err := m.oldValue(ctx) - if err != nil { - return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) - } - return oldValue.UpdatedAt, nil -} - -// ResetUpdatedAt resets all changes to the "updated_at" field. -func (m *BackupSourceConfigMutation) ResetUpdatedAt() { - m.updated_at = nil -} - -// Where appends a list predicates to the BackupSourceConfigMutation builder. -func (m *BackupSourceConfigMutation) Where(ps ...predicate.BackupSourceConfig) { - m.predicates = append(m.predicates, ps...) -} - -// WhereP appends storage-level predicates to the BackupSourceConfigMutation builder. Using this method, -// users can use type-assertion to append predicates that do not depend on any generated package. -func (m *BackupSourceConfigMutation) WhereP(ps ...func(*sql.Selector)) { - p := make([]predicate.BackupSourceConfig, len(ps)) - for i := range ps { - p[i] = ps[i] - } - m.Where(p...) -} - -// Op returns the operation name. -func (m *BackupSourceConfigMutation) Op() Op { - return m.op -} - -// SetOp allows setting the mutation operation. -func (m *BackupSourceConfigMutation) SetOp(op Op) { - m.op = op -} - -// Type returns the node type of this mutation (BackupSourceConfig). -func (m *BackupSourceConfigMutation) Type() string { - return m.typ -} - -// Fields returns all fields that were changed during this mutation. Note that in -// order to get all numeric fields that were incremented/decremented, call -// AddedFields(). -func (m *BackupSourceConfigMutation) Fields() []string { - fields := make([]string, 0, 15) - if m.source_type != nil { - fields = append(fields, backupsourceconfig.FieldSourceType) - } - if m.profile_id != nil { - fields = append(fields, backupsourceconfig.FieldProfileID) - } - if m.name != nil { - fields = append(fields, backupsourceconfig.FieldName) - } - if m.is_active != nil { - fields = append(fields, backupsourceconfig.FieldIsActive) - } - if m.host != nil { - fields = append(fields, backupsourceconfig.FieldHost) - } - if m.port != nil { - fields = append(fields, backupsourceconfig.FieldPort) - } - if m.username != nil { - fields = append(fields, backupsourceconfig.FieldUsername) - } - if m.password_encrypted != nil { - fields = append(fields, backupsourceconfig.FieldPasswordEncrypted) - } - if m.database != nil { - fields = append(fields, backupsourceconfig.FieldDatabase) - } - if m.ssl_mode != nil { - fields = append(fields, backupsourceconfig.FieldSslMode) - } - if m.addr != nil { - fields = append(fields, backupsourceconfig.FieldAddr) - } - if m.redis_db != nil { - fields = append(fields, backupsourceconfig.FieldRedisDb) - } - if m.container_name != nil { - fields = append(fields, backupsourceconfig.FieldContainerName) - } - if m.created_at != nil { - fields = append(fields, backupsourceconfig.FieldCreatedAt) - } - if m.updated_at != nil { - fields = append(fields, backupsourceconfig.FieldUpdatedAt) - } - return fields -} - -// Field returns the value of a field with the given name. The second boolean -// return value indicates that this field was not set, or was not defined in the -// schema. -func (m *BackupSourceConfigMutation) Field(name string) (ent.Value, bool) { - switch name { - case backupsourceconfig.FieldSourceType: - return m.SourceType() - case backupsourceconfig.FieldProfileID: - return m.ProfileID() - case backupsourceconfig.FieldName: - return m.Name() - case backupsourceconfig.FieldIsActive: - return m.IsActive() - case backupsourceconfig.FieldHost: - return m.Host() - case backupsourceconfig.FieldPort: - return m.Port() - case backupsourceconfig.FieldUsername: - return m.Username() - case backupsourceconfig.FieldPasswordEncrypted: - return m.PasswordEncrypted() - case backupsourceconfig.FieldDatabase: - return m.Database() - case backupsourceconfig.FieldSslMode: - return m.SslMode() - case backupsourceconfig.FieldAddr: - return m.Addr() - case backupsourceconfig.FieldRedisDb: - return m.RedisDb() - case backupsourceconfig.FieldContainerName: - return m.ContainerName() - case backupsourceconfig.FieldCreatedAt: - return m.CreatedAt() - case backupsourceconfig.FieldUpdatedAt: - return m.UpdatedAt() - } - return nil, false -} - -// OldField returns the old value of the field from the database. An error is -// returned if the mutation operation is not UpdateOne, or the query to the -// database failed. -func (m *BackupSourceConfigMutation) OldField(ctx context.Context, name string) (ent.Value, error) { - switch name { - case backupsourceconfig.FieldSourceType: - return m.OldSourceType(ctx) - case backupsourceconfig.FieldProfileID: - return m.OldProfileID(ctx) - case backupsourceconfig.FieldName: - return m.OldName(ctx) - case backupsourceconfig.FieldIsActive: - return m.OldIsActive(ctx) - case backupsourceconfig.FieldHost: - return m.OldHost(ctx) - case backupsourceconfig.FieldPort: - return m.OldPort(ctx) - case backupsourceconfig.FieldUsername: - return m.OldUsername(ctx) - case backupsourceconfig.FieldPasswordEncrypted: - return m.OldPasswordEncrypted(ctx) - case backupsourceconfig.FieldDatabase: - return m.OldDatabase(ctx) - case backupsourceconfig.FieldSslMode: - return m.OldSslMode(ctx) - case backupsourceconfig.FieldAddr: - return m.OldAddr(ctx) - case backupsourceconfig.FieldRedisDb: - return m.OldRedisDb(ctx) - case backupsourceconfig.FieldContainerName: - return m.OldContainerName(ctx) - case backupsourceconfig.FieldCreatedAt: - return m.OldCreatedAt(ctx) - case backupsourceconfig.FieldUpdatedAt: - return m.OldUpdatedAt(ctx) - } - return nil, fmt.Errorf("unknown BackupSourceConfig field %s", name) -} - -// SetField sets the value of a field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupSourceConfigMutation) SetField(name string, value ent.Value) error { - switch name { - case backupsourceconfig.FieldSourceType: - v, ok := value.(backupsourceconfig.SourceType) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetSourceType(v) - return nil - case backupsourceconfig.FieldProfileID: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetProfileID(v) - return nil - case backupsourceconfig.FieldName: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetName(v) - return nil - case backupsourceconfig.FieldIsActive: - v, ok := value.(bool) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetIsActive(v) - return nil - case backupsourceconfig.FieldHost: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetHost(v) - return nil - case backupsourceconfig.FieldPort: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetPort(v) - return nil - case backupsourceconfig.FieldUsername: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUsername(v) - return nil - case backupsourceconfig.FieldPasswordEncrypted: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetPasswordEncrypted(v) - return nil - case backupsourceconfig.FieldDatabase: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetDatabase(v) - return nil - case backupsourceconfig.FieldSslMode: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetSslMode(v) - return nil - case backupsourceconfig.FieldAddr: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetAddr(v) - return nil - case backupsourceconfig.FieldRedisDb: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetRedisDb(v) - return nil - case backupsourceconfig.FieldContainerName: - v, ok := value.(string) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetContainerName(v) - return nil - case backupsourceconfig.FieldCreatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetCreatedAt(v) - return nil - case backupsourceconfig.FieldUpdatedAt: - v, ok := value.(time.Time) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.SetUpdatedAt(v) - return nil - } - return fmt.Errorf("unknown BackupSourceConfig field %s", name) -} - -// AddedFields returns all numeric fields that were incremented/decremented during -// this mutation. -func (m *BackupSourceConfigMutation) AddedFields() []string { - var fields []string - if m.addport != nil { - fields = append(fields, backupsourceconfig.FieldPort) - } - if m.addredis_db != nil { - fields = append(fields, backupsourceconfig.FieldRedisDb) - } - return fields -} - -// AddedField returns the numeric value that was incremented/decremented on a field -// with the given name. The second boolean return value indicates that this field -// was not set, or was not defined in the schema. -func (m *BackupSourceConfigMutation) AddedField(name string) (ent.Value, bool) { - switch name { - case backupsourceconfig.FieldPort: - return m.AddedPort() - case backupsourceconfig.FieldRedisDb: - return m.AddedRedisDb() - } - return nil, false -} - -// AddField adds the value to the field with the given name. It returns an error if -// the field is not defined in the schema, or if the type mismatched the field -// type. -func (m *BackupSourceConfigMutation) AddField(name string, value ent.Value) error { - switch name { - case backupsourceconfig.FieldPort: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddPort(v) - return nil - case backupsourceconfig.FieldRedisDb: - v, ok := value.(int) - if !ok { - return fmt.Errorf("unexpected type %T for field %s", value, name) - } - m.AddRedisDb(v) - return nil - } - return fmt.Errorf("unknown BackupSourceConfig numeric field %s", name) -} - -// ClearedFields returns all nullable fields that were cleared during this -// mutation. -func (m *BackupSourceConfigMutation) ClearedFields() []string { - var fields []string - if m.FieldCleared(backupsourceconfig.FieldHost) { - fields = append(fields, backupsourceconfig.FieldHost) - } - if m.FieldCleared(backupsourceconfig.FieldPort) { - fields = append(fields, backupsourceconfig.FieldPort) - } - if m.FieldCleared(backupsourceconfig.FieldUsername) { - fields = append(fields, backupsourceconfig.FieldUsername) - } - if m.FieldCleared(backupsourceconfig.FieldPasswordEncrypted) { - fields = append(fields, backupsourceconfig.FieldPasswordEncrypted) - } - if m.FieldCleared(backupsourceconfig.FieldDatabase) { - fields = append(fields, backupsourceconfig.FieldDatabase) - } - if m.FieldCleared(backupsourceconfig.FieldSslMode) { - fields = append(fields, backupsourceconfig.FieldSslMode) - } - if m.FieldCleared(backupsourceconfig.FieldAddr) { - fields = append(fields, backupsourceconfig.FieldAddr) - } - if m.FieldCleared(backupsourceconfig.FieldRedisDb) { - fields = append(fields, backupsourceconfig.FieldRedisDb) - } - return fields -} - -// FieldCleared returns a boolean indicating if a field with the given name was -// cleared in this mutation. -func (m *BackupSourceConfigMutation) FieldCleared(name string) bool { - _, ok := m.clearedFields[name] - return ok -} - -// ClearField clears the value of the field with the given name. It returns an -// error if the field is not defined in the schema. -func (m *BackupSourceConfigMutation) ClearField(name string) error { - switch name { - case backupsourceconfig.FieldHost: - m.ClearHost() - return nil - case backupsourceconfig.FieldPort: - m.ClearPort() - return nil - case backupsourceconfig.FieldUsername: - m.ClearUsername() - return nil - case backupsourceconfig.FieldPasswordEncrypted: - m.ClearPasswordEncrypted() - return nil - case backupsourceconfig.FieldDatabase: - m.ClearDatabase() - return nil - case backupsourceconfig.FieldSslMode: - m.ClearSslMode() - return nil - case backupsourceconfig.FieldAddr: - m.ClearAddr() - return nil - case backupsourceconfig.FieldRedisDb: - m.ClearRedisDb() - return nil - } - return fmt.Errorf("unknown BackupSourceConfig nullable field %s", name) -} - -// ResetField resets all changes in the mutation for the field with the given name. -// It returns an error if the field is not defined in the schema. -func (m *BackupSourceConfigMutation) ResetField(name string) error { - switch name { - case backupsourceconfig.FieldSourceType: - m.ResetSourceType() - return nil - case backupsourceconfig.FieldProfileID: - m.ResetProfileID() - return nil - case backupsourceconfig.FieldName: - m.ResetName() - return nil - case backupsourceconfig.FieldIsActive: - m.ResetIsActive() - return nil - case backupsourceconfig.FieldHost: - m.ResetHost() - return nil - case backupsourceconfig.FieldPort: - m.ResetPort() - return nil - case backupsourceconfig.FieldUsername: - m.ResetUsername() - return nil - case backupsourceconfig.FieldPasswordEncrypted: - m.ResetPasswordEncrypted() - return nil - case backupsourceconfig.FieldDatabase: - m.ResetDatabase() - return nil - case backupsourceconfig.FieldSslMode: - m.ResetSslMode() - return nil - case backupsourceconfig.FieldAddr: - m.ResetAddr() - return nil - case backupsourceconfig.FieldRedisDb: - m.ResetRedisDb() - return nil - case backupsourceconfig.FieldContainerName: - m.ResetContainerName() - return nil - case backupsourceconfig.FieldCreatedAt: - m.ResetCreatedAt() - return nil - case backupsourceconfig.FieldUpdatedAt: - m.ResetUpdatedAt() - return nil - } - return fmt.Errorf("unknown BackupSourceConfig field %s", name) -} - -// AddedEdges returns all edge names that were set/added in this mutation. -func (m *BackupSourceConfigMutation) AddedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// AddedIDs returns all IDs (to other nodes) that were added for the given edge -// name in this mutation. -func (m *BackupSourceConfigMutation) AddedIDs(name string) []ent.Value { - return nil -} - -// RemovedEdges returns all edge names that were removed in this mutation. -func (m *BackupSourceConfigMutation) RemovedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with -// the given name in this mutation. -func (m *BackupSourceConfigMutation) RemovedIDs(name string) []ent.Value { - return nil -} - -// ClearedEdges returns all edge names that were cleared in this mutation. -func (m *BackupSourceConfigMutation) ClearedEdges() []string { - edges := make([]string, 0, 0) - return edges -} - -// EdgeCleared returns a boolean which indicates if the edge with the given name -// was cleared in this mutation. -func (m *BackupSourceConfigMutation) EdgeCleared(name string) bool { - return false -} - -// ClearEdge clears the value of the edge with the given name. It returns an error -// if that edge is not defined in the schema. -func (m *BackupSourceConfigMutation) ClearEdge(name string) error { - return fmt.Errorf("unknown BackupSourceConfig unique edge %s", name) -} - -// ResetEdge resets all changes to the edge with the given name in this mutation. -// It returns an error if the edge is not defined in the schema. -func (m *BackupSourceConfigMutation) ResetEdge(name string) error { - return fmt.Errorf("unknown BackupSourceConfig edge %s", name) -} diff --git a/backup/ent/predicate/predicate.go b/backup/ent/predicate/predicate.go deleted file mode 100644 index e1f1ad927..000000000 --- a/backup/ent/predicate/predicate.go +++ /dev/null @@ -1,22 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package predicate - -import ( - "entgo.io/ent/dialect/sql" -) - -// BackupJob is the predicate function for backupjob builders. -type BackupJob func(*sql.Selector) - -// BackupJobEvent is the predicate function for backupjobevent builders. -type BackupJobEvent func(*sql.Selector) - -// BackupS3Config is the predicate function for backups3config builders. -type BackupS3Config func(*sql.Selector) - -// BackupSetting is the predicate function for backupsetting builders. -type BackupSetting func(*sql.Selector) - -// BackupSourceConfig is the predicate function for backupsourceconfig builders. -type BackupSourceConfig func(*sql.Selector) diff --git a/backup/ent/runtime.go b/backup/ent/runtime.go deleted file mode 100644 index 1d5ab9a13..000000000 --- a/backup/ent/runtime.go +++ /dev/null @@ -1,158 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "time" - - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" - "github.com/Wei-Shaw/sub2api/backup/ent/schema" -) - -// The init function reads all schema descriptors with runtime code -// (default values, validators, hooks and policies) and stitches it -// to their package variables. -func init() { - backupjobFields := schema.BackupJob{}.Fields() - _ = backupjobFields - // backupjobDescTriggeredBy is the schema descriptor for triggered_by field. - backupjobDescTriggeredBy := backupjobFields[3].Descriptor() - // backupjob.DefaultTriggeredBy holds the default value on creation for the triggered_by field. - backupjob.DefaultTriggeredBy = backupjobDescTriggeredBy.Default.(string) - // backupjobDescUploadToS3 is the schema descriptor for upload_to_s3 field. - backupjobDescUploadToS3 := backupjobFields[5].Descriptor() - // backupjob.DefaultUploadToS3 holds the default value on creation for the upload_to_s3 field. - backupjob.DefaultUploadToS3 = backupjobDescUploadToS3.Default.(bool) - // backupjobDescCreatedAt is the schema descriptor for created_at field. - backupjobDescCreatedAt := backupjobFields[18].Descriptor() - // backupjob.DefaultCreatedAt holds the default value on creation for the created_at field. - backupjob.DefaultCreatedAt = backupjobDescCreatedAt.Default.(func() time.Time) - // backupjobDescUpdatedAt is the schema descriptor for updated_at field. - backupjobDescUpdatedAt := backupjobFields[19].Descriptor() - // backupjob.DefaultUpdatedAt holds the default value on creation for the updated_at field. - backupjob.DefaultUpdatedAt = backupjobDescUpdatedAt.Default.(func() time.Time) - // backupjob.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. - backupjob.UpdateDefaultUpdatedAt = backupjobDescUpdatedAt.UpdateDefault.(func() time.Time) - backupjobeventFields := schema.BackupJobEvent{}.Fields() - _ = backupjobeventFields - // backupjobeventDescEventType is the schema descriptor for event_type field. - backupjobeventDescEventType := backupjobeventFields[2].Descriptor() - // backupjobevent.DefaultEventType holds the default value on creation for the event_type field. - backupjobevent.DefaultEventType = backupjobeventDescEventType.Default.(string) - // backupjobeventDescEventTime is the schema descriptor for event_time field. - backupjobeventDescEventTime := backupjobeventFields[5].Descriptor() - // backupjobevent.DefaultEventTime holds the default value on creation for the event_time field. - backupjobevent.DefaultEventTime = backupjobeventDescEventTime.Default.(func() time.Time) - // backupjobeventDescCreatedAt is the schema descriptor for created_at field. - backupjobeventDescCreatedAt := backupjobeventFields[6].Descriptor() - // backupjobevent.DefaultCreatedAt holds the default value on creation for the created_at field. - backupjobevent.DefaultCreatedAt = backupjobeventDescCreatedAt.Default.(func() time.Time) - backups3configFields := schema.BackupS3Config{}.Fields() - _ = backups3configFields - // backups3configDescProfileID is the schema descriptor for profile_id field. - backups3configDescProfileID := backups3configFields[0].Descriptor() - // backups3config.DefaultProfileID holds the default value on creation for the profile_id field. - backups3config.DefaultProfileID = backups3configDescProfileID.Default.(string) - // backups3configDescName is the schema descriptor for name field. - backups3configDescName := backups3configFields[1].Descriptor() - // backups3config.DefaultName holds the default value on creation for the name field. - backups3config.DefaultName = backups3configDescName.Default.(string) - // backups3configDescIsActive is the schema descriptor for is_active field. - backups3configDescIsActive := backups3configFields[2].Descriptor() - // backups3config.DefaultIsActive holds the default value on creation for the is_active field. - backups3config.DefaultIsActive = backups3configDescIsActive.Default.(bool) - // backups3configDescEnabled is the schema descriptor for enabled field. - backups3configDescEnabled := backups3configFields[3].Descriptor() - // backups3config.DefaultEnabled holds the default value on creation for the enabled field. - backups3config.DefaultEnabled = backups3configDescEnabled.Default.(bool) - // backups3configDescEndpoint is the schema descriptor for endpoint field. - backups3configDescEndpoint := backups3configFields[4].Descriptor() - // backups3config.DefaultEndpoint holds the default value on creation for the endpoint field. - backups3config.DefaultEndpoint = backups3configDescEndpoint.Default.(string) - // backups3configDescRegion is the schema descriptor for region field. - backups3configDescRegion := backups3configFields[5].Descriptor() - // backups3config.DefaultRegion holds the default value on creation for the region field. - backups3config.DefaultRegion = backups3configDescRegion.Default.(string) - // backups3configDescBucket is the schema descriptor for bucket field. - backups3configDescBucket := backups3configFields[6].Descriptor() - // backups3config.DefaultBucket holds the default value on creation for the bucket field. - backups3config.DefaultBucket = backups3configDescBucket.Default.(string) - // backups3configDescAccessKeyID is the schema descriptor for access_key_id field. - backups3configDescAccessKeyID := backups3configFields[7].Descriptor() - // backups3config.DefaultAccessKeyID holds the default value on creation for the access_key_id field. - backups3config.DefaultAccessKeyID = backups3configDescAccessKeyID.Default.(string) - // backups3configDescPrefix is the schema descriptor for prefix field. - backups3configDescPrefix := backups3configFields[9].Descriptor() - // backups3config.DefaultPrefix holds the default value on creation for the prefix field. - backups3config.DefaultPrefix = backups3configDescPrefix.Default.(string) - // backups3configDescForcePathStyle is the schema descriptor for force_path_style field. - backups3configDescForcePathStyle := backups3configFields[10].Descriptor() - // backups3config.DefaultForcePathStyle holds the default value on creation for the force_path_style field. - backups3config.DefaultForcePathStyle = backups3configDescForcePathStyle.Default.(bool) - // backups3configDescUseSsl is the schema descriptor for use_ssl field. - backups3configDescUseSsl := backups3configFields[11].Descriptor() - // backups3config.DefaultUseSsl holds the default value on creation for the use_ssl field. - backups3config.DefaultUseSsl = backups3configDescUseSsl.Default.(bool) - // backups3configDescCreatedAt is the schema descriptor for created_at field. - backups3configDescCreatedAt := backups3configFields[12].Descriptor() - // backups3config.DefaultCreatedAt holds the default value on creation for the created_at field. - backups3config.DefaultCreatedAt = backups3configDescCreatedAt.Default.(func() time.Time) - // backups3configDescUpdatedAt is the schema descriptor for updated_at field. - backups3configDescUpdatedAt := backups3configFields[13].Descriptor() - // backups3config.DefaultUpdatedAt holds the default value on creation for the updated_at field. - backups3config.DefaultUpdatedAt = backups3configDescUpdatedAt.Default.(func() time.Time) - // backups3config.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. - backups3config.UpdateDefaultUpdatedAt = backups3configDescUpdatedAt.UpdateDefault.(func() time.Time) - backupsettingFields := schema.BackupSetting{}.Fields() - _ = backupsettingFields - // backupsettingDescBackupRoot is the schema descriptor for backup_root field. - backupsettingDescBackupRoot := backupsettingFields[1].Descriptor() - // backupsetting.DefaultBackupRoot holds the default value on creation for the backup_root field. - backupsetting.DefaultBackupRoot = backupsettingDescBackupRoot.Default.(string) - // backupsettingDescRetentionDays is the schema descriptor for retention_days field. - backupsettingDescRetentionDays := backupsettingFields[2].Descriptor() - // backupsetting.DefaultRetentionDays holds the default value on creation for the retention_days field. - backupsetting.DefaultRetentionDays = backupsettingDescRetentionDays.Default.(int) - // backupsettingDescKeepLast is the schema descriptor for keep_last field. - backupsettingDescKeepLast := backupsettingFields[3].Descriptor() - // backupsetting.DefaultKeepLast holds the default value on creation for the keep_last field. - backupsetting.DefaultKeepLast = backupsettingDescKeepLast.Default.(int) - // backupsettingDescSqlitePath is the schema descriptor for sqlite_path field. - backupsettingDescSqlitePath := backupsettingFields[4].Descriptor() - // backupsetting.DefaultSqlitePath holds the default value on creation for the sqlite_path field. - backupsetting.DefaultSqlitePath = backupsettingDescSqlitePath.Default.(string) - // backupsettingDescCreatedAt is the schema descriptor for created_at field. - backupsettingDescCreatedAt := backupsettingFields[5].Descriptor() - // backupsetting.DefaultCreatedAt holds the default value on creation for the created_at field. - backupsetting.DefaultCreatedAt = backupsettingDescCreatedAt.Default.(func() time.Time) - // backupsettingDescUpdatedAt is the schema descriptor for updated_at field. - backupsettingDescUpdatedAt := backupsettingFields[6].Descriptor() - // backupsetting.DefaultUpdatedAt holds the default value on creation for the updated_at field. - backupsetting.DefaultUpdatedAt = backupsettingDescUpdatedAt.Default.(func() time.Time) - // backupsetting.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. - backupsetting.UpdateDefaultUpdatedAt = backupsettingDescUpdatedAt.UpdateDefault.(func() time.Time) - backupsourceconfigFields := schema.BackupSourceConfig{}.Fields() - _ = backupsourceconfigFields - // backupsourceconfigDescIsActive is the schema descriptor for is_active field. - backupsourceconfigDescIsActive := backupsourceconfigFields[3].Descriptor() - // backupsourceconfig.DefaultIsActive holds the default value on creation for the is_active field. - backupsourceconfig.DefaultIsActive = backupsourceconfigDescIsActive.Default.(bool) - // backupsourceconfigDescContainerName is the schema descriptor for container_name field. - backupsourceconfigDescContainerName := backupsourceconfigFields[12].Descriptor() - // backupsourceconfig.DefaultContainerName holds the default value on creation for the container_name field. - backupsourceconfig.DefaultContainerName = backupsourceconfigDescContainerName.Default.(string) - // backupsourceconfigDescCreatedAt is the schema descriptor for created_at field. - backupsourceconfigDescCreatedAt := backupsourceconfigFields[13].Descriptor() - // backupsourceconfig.DefaultCreatedAt holds the default value on creation for the created_at field. - backupsourceconfig.DefaultCreatedAt = backupsourceconfigDescCreatedAt.Default.(func() time.Time) - // backupsourceconfigDescUpdatedAt is the schema descriptor for updated_at field. - backupsourceconfigDescUpdatedAt := backupsourceconfigFields[14].Descriptor() - // backupsourceconfig.DefaultUpdatedAt holds the default value on creation for the updated_at field. - backupsourceconfig.DefaultUpdatedAt = backupsourceconfigDescUpdatedAt.Default.(func() time.Time) - // backupsourceconfig.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. - backupsourceconfig.UpdateDefaultUpdatedAt = backupsourceconfigDescUpdatedAt.UpdateDefault.(func() time.Time) -} diff --git a/backup/ent/runtime/runtime.go b/backup/ent/runtime/runtime.go deleted file mode 100644 index 98973c65a..000000000 --- a/backup/ent/runtime/runtime.go +++ /dev/null @@ -1,10 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package runtime - -// The schema-stitching logic is generated in github.com/Wei-Shaw/sub2api/backup/ent/runtime.go - -const ( - Version = "v0.14.5" // Version of ent codegen. - Sum = "h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4=" // Sum of ent codegen. -) diff --git a/backup/ent/schema/backup_job.go b/backup/ent/schema/backup_job.go deleted file mode 100644 index 39f023a4a..000000000 --- a/backup/ent/schema/backup_job.go +++ /dev/null @@ -1,56 +0,0 @@ -package schema - -import ( - "time" - - "entgo.io/ent" - "entgo.io/ent/schema/edge" - "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" -) - -type BackupJob struct { - ent.Schema -} - -func (BackupJob) Fields() []ent.Field { - return []ent.Field{ - field.String("job_id").Unique(), - field.Enum("backup_type").Values("postgres", "redis", "full"), - field.Enum("status").Values("queued", "running", "succeeded", "failed", "partial_succeeded").Default("queued"), - field.String("triggered_by").Default("system"), - field.String("idempotency_key").Optional(), - field.Bool("upload_to_s3").Default(false), - field.String("s3_profile_id").Optional(), - field.String("postgres_profile_id").Optional(), - field.String("redis_profile_id").Optional(), - field.Time("started_at").Optional().Nillable(), - field.Time("finished_at").Optional().Nillable(), - field.String("error_message").Optional(), - field.String("artifact_local_path").Optional(), - field.Int64("artifact_size_bytes").Optional().Nillable(), - field.String("artifact_sha256").Optional(), - field.String("s3_bucket").Optional(), - field.String("s3_key").Optional(), - field.String("s3_etag").Optional(), - field.Time("created_at").Default(time.Now).Immutable(), - field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), - } -} - -func (BackupJob) Edges() []ent.Edge { - return []ent.Edge{ - edge.From("events", BackupJobEvent.Type).Ref("job"), - } -} - -func (BackupJob) Indexes() []ent.Index { - return []ent.Index{ - index.Fields("status", "created_at"), - index.Fields("backup_type", "created_at"), - index.Fields("idempotency_key"), - index.Fields("s3_profile_id", "status"), - index.Fields("postgres_profile_id", "status"), - index.Fields("redis_profile_id", "status"), - } -} diff --git a/backup/ent/schema/backup_job_event.go b/backup/ent/schema/backup_job_event.go deleted file mode 100644 index b4804ad7a..000000000 --- a/backup/ent/schema/backup_job_event.go +++ /dev/null @@ -1,38 +0,0 @@ -package schema - -import ( - "time" - - "entgo.io/ent" - "entgo.io/ent/schema/edge" - "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" -) - -type BackupJobEvent struct { - ent.Schema -} - -func (BackupJobEvent) Fields() []ent.Field { - return []ent.Field{ - field.Int("backup_job_id"), - field.Enum("level").Values("info", "warning", "error").Default("info"), - field.String("event_type").Default("state_change"), - field.String("message"), - field.String("payload").Optional(), - field.Time("event_time").Default(time.Now), - field.Time("created_at").Default(time.Now).Immutable(), - } -} - -func (BackupJobEvent) Edges() []ent.Edge { - return []ent.Edge{ - edge.To("job", BackupJob.Type).Field("backup_job_id").Unique().Required(), - } -} - -func (BackupJobEvent) Indexes() []ent.Index { - return []ent.Index{ - index.Fields("backup_job_id", "event_time"), - } -} diff --git a/backup/ent/schema/backup_s3_config.go b/backup/ent/schema/backup_s3_config.go deleted file mode 100644 index c4b5c1ad0..000000000 --- a/backup/ent/schema/backup_s3_config.go +++ /dev/null @@ -1,39 +0,0 @@ -package schema - -import ( - "time" - - "entgo.io/ent" - "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" -) - -type BackupS3Config struct { - ent.Schema -} - -func (BackupS3Config) Fields() []ent.Field { - return []ent.Field{ - field.String("profile_id").Default("default"), - field.String("name").Default("默认账号"), - field.Bool("is_active").Default(false), - field.Bool("enabled").Default(false), - field.String("endpoint").Default(""), - field.String("region").Default(""), - field.String("bucket").Default(""), - field.String("access_key_id").Default(""), - field.String("secret_access_key_encrypted").Optional().Sensitive(), - field.String("prefix").Default(""), - field.Bool("force_path_style").Default(false), - field.Bool("use_ssl").Default(true), - field.Time("created_at").Default(time.Now).Immutable(), - field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), - } -} - -func (BackupS3Config) Indexes() []ent.Index { - return []ent.Index{ - index.Fields("profile_id").Unique(), - index.Fields("is_active"), - } -} diff --git a/backup/ent/schema/backup_setting.go b/backup/ent/schema/backup_setting.go deleted file mode 100644 index 5ddb69283..000000000 --- a/backup/ent/schema/backup_setting.go +++ /dev/null @@ -1,24 +0,0 @@ -package schema - -import ( - "time" - - "entgo.io/ent" - "entgo.io/ent/schema/field" -) - -type BackupSetting struct { - ent.Schema -} - -func (BackupSetting) Fields() []ent.Field { - return []ent.Field{ - field.Enum("source_mode").Values("direct", "docker_exec").Default("direct"), - field.String("backup_root").Default("/var/lib/sub2api/backups"), - field.Int("retention_days").Default(7), - field.Int("keep_last").Default(30), - field.String("sqlite_path").Default("/var/lib/sub2api/backupd.db"), - field.Time("created_at").Default(time.Now).Immutable(), - field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), - } -} diff --git a/backup/ent/schema/backup_source_config.go b/backup/ent/schema/backup_source_config.go deleted file mode 100644 index f399d4123..000000000 --- a/backup/ent/schema/backup_source_config.go +++ /dev/null @@ -1,40 +0,0 @@ -package schema - -import ( - "time" - - "entgo.io/ent" - "entgo.io/ent/schema/field" - "entgo.io/ent/schema/index" -) - -type BackupSourceConfig struct { - ent.Schema -} - -func (BackupSourceConfig) Fields() []ent.Field { - return []ent.Field{ - field.Enum("source_type").Values("postgres", "redis"), - field.String("profile_id"), - field.String("name"), - field.Bool("is_active").Default(false), - field.String("host").Optional(), - field.Int("port").Optional().Nillable(), - field.String("username").Optional(), - field.String("password_encrypted").Optional().Sensitive(), - field.String("database").Optional(), - field.String("ssl_mode").Optional(), - field.String("addr").Optional(), - field.Int("redis_db").Optional().Nillable(), - field.String("container_name").Default(""), - field.Time("created_at").Default(time.Now).Immutable(), - field.Time("updated_at").Default(time.Now).UpdateDefault(time.Now), - } -} - -func (BackupSourceConfig) Indexes() []ent.Index { - return []ent.Index{ - index.Fields("source_type", "profile_id").Unique(), - index.Fields("source_type", "is_active"), - } -} diff --git a/backup/ent/tx.go b/backup/ent/tx.go deleted file mode 100644 index ace341489..000000000 --- a/backup/ent/tx.go +++ /dev/null @@ -1,222 +0,0 @@ -// Code generated by ent, DO NOT EDIT. - -package ent - -import ( - "context" - "sync" - - "entgo.io/ent/dialect" -) - -// Tx is a transactional client that is created by calling Client.Tx(). -type Tx struct { - config - // BackupJob is the client for interacting with the BackupJob builders. - BackupJob *BackupJobClient - // BackupJobEvent is the client for interacting with the BackupJobEvent builders. - BackupJobEvent *BackupJobEventClient - // BackupS3Config is the client for interacting with the BackupS3Config builders. - BackupS3Config *BackupS3ConfigClient - // BackupSetting is the client for interacting with the BackupSetting builders. - BackupSetting *BackupSettingClient - // BackupSourceConfig is the client for interacting with the BackupSourceConfig builders. - BackupSourceConfig *BackupSourceConfigClient - - // lazily loaded. - client *Client - clientOnce sync.Once - // ctx lives for the life of the transaction. It is - // the same context used by the underlying connection. - ctx context.Context -} - -type ( - // Committer is the interface that wraps the Commit method. - Committer interface { - Commit(context.Context, *Tx) error - } - - // The CommitFunc type is an adapter to allow the use of ordinary - // function as a Committer. If f is a function with the appropriate - // signature, CommitFunc(f) is a Committer that calls f. - CommitFunc func(context.Context, *Tx) error - - // CommitHook defines the "commit middleware". A function that gets a Committer - // and returns a Committer. For example: - // - // hook := func(next ent.Committer) ent.Committer { - // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { - // // Do some stuff before. - // if err := next.Commit(ctx, tx); err != nil { - // return err - // } - // // Do some stuff after. - // return nil - // }) - // } - // - CommitHook func(Committer) Committer -) - -// Commit calls f(ctx, m). -func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { - return f(ctx, tx) -} - -// Commit commits the transaction. -func (tx *Tx) Commit() error { - txDriver := tx.config.driver.(*txDriver) - var fn Committer = CommitFunc(func(context.Context, *Tx) error { - return txDriver.tx.Commit() - }) - txDriver.mu.Lock() - hooks := append([]CommitHook(nil), txDriver.onCommit...) - txDriver.mu.Unlock() - for i := len(hooks) - 1; i >= 0; i-- { - fn = hooks[i](fn) - } - return fn.Commit(tx.ctx, tx) -} - -// OnCommit adds a hook to call on commit. -func (tx *Tx) OnCommit(f CommitHook) { - txDriver := tx.config.driver.(*txDriver) - txDriver.mu.Lock() - txDriver.onCommit = append(txDriver.onCommit, f) - txDriver.mu.Unlock() -} - -type ( - // Rollbacker is the interface that wraps the Rollback method. - Rollbacker interface { - Rollback(context.Context, *Tx) error - } - - // The RollbackFunc type is an adapter to allow the use of ordinary - // function as a Rollbacker. If f is a function with the appropriate - // signature, RollbackFunc(f) is a Rollbacker that calls f. - RollbackFunc func(context.Context, *Tx) error - - // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker - // and returns a Rollbacker. For example: - // - // hook := func(next ent.Rollbacker) ent.Rollbacker { - // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { - // // Do some stuff before. - // if err := next.Rollback(ctx, tx); err != nil { - // return err - // } - // // Do some stuff after. - // return nil - // }) - // } - // - RollbackHook func(Rollbacker) Rollbacker -) - -// Rollback calls f(ctx, m). -func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { - return f(ctx, tx) -} - -// Rollback rollbacks the transaction. -func (tx *Tx) Rollback() error { - txDriver := tx.config.driver.(*txDriver) - var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { - return txDriver.tx.Rollback() - }) - txDriver.mu.Lock() - hooks := append([]RollbackHook(nil), txDriver.onRollback...) - txDriver.mu.Unlock() - for i := len(hooks) - 1; i >= 0; i-- { - fn = hooks[i](fn) - } - return fn.Rollback(tx.ctx, tx) -} - -// OnRollback adds a hook to call on rollback. -func (tx *Tx) OnRollback(f RollbackHook) { - txDriver := tx.config.driver.(*txDriver) - txDriver.mu.Lock() - txDriver.onRollback = append(txDriver.onRollback, f) - txDriver.mu.Unlock() -} - -// Client returns a Client that binds to current transaction. -func (tx *Tx) Client() *Client { - tx.clientOnce.Do(func() { - tx.client = &Client{config: tx.config} - tx.client.init() - }) - return tx.client -} - -func (tx *Tx) init() { - tx.BackupJob = NewBackupJobClient(tx.config) - tx.BackupJobEvent = NewBackupJobEventClient(tx.config) - tx.BackupS3Config = NewBackupS3ConfigClient(tx.config) - tx.BackupSetting = NewBackupSettingClient(tx.config) - tx.BackupSourceConfig = NewBackupSourceConfigClient(tx.config) -} - -// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. -// The idea is to support transactions without adding any extra code to the builders. -// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. -// Commit and Rollback are nop for the internal builders and the user must call one -// of them in order to commit or rollback the transaction. -// -// If a closed transaction is embedded in one of the generated entities, and the entity -// applies a query, for example: BackupJob.QueryXXX(), the query will be executed -// through the driver which created this transaction. -// -// Note that txDriver is not goroutine safe. -type txDriver struct { - // the driver we started the transaction from. - drv dialect.Driver - // tx is the underlying transaction. - tx dialect.Tx - // completion hooks. - mu sync.Mutex - onCommit []CommitHook - onRollback []RollbackHook -} - -// newTx creates a new transactional driver. -func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { - tx, err := drv.Tx(ctx) - if err != nil { - return nil, err - } - return &txDriver{tx: tx, drv: drv}, nil -} - -// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls -// from the internal builders. Should be called only by the internal builders. -func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } - -// Dialect returns the dialect of the driver we started the transaction from. -func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } - -// Close is a nop close. -func (*txDriver) Close() error { return nil } - -// Commit is a nop commit for the internal builders. -// User must call `Tx.Commit` in order to commit the transaction. -func (*txDriver) Commit() error { return nil } - -// Rollback is a nop rollback for the internal builders. -// User must call `Tx.Rollback` in order to rollback the transaction. -func (*txDriver) Rollback() error { return nil } - -// Exec calls tx.Exec. -func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { - return tx.tx.Exec(ctx, query, args, v) -} - -// Query calls tx.Query. -func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { - return tx.tx.Query(ctx, query, args, v) -} - -var _ dialect.Driver = (*txDriver)(nil) diff --git a/backup/go.mod b/backup/go.mod deleted file mode 100644 index f29d205f7..000000000 --- a/backup/go.mod +++ /dev/null @@ -1,62 +0,0 @@ -module github.com/Wei-Shaw/sub2api/backup - -go 1.25.7 - -require ( - entgo.io/ent v0.14.5 - github.com/aws/aws-sdk-go-v2 v1.41.2 - github.com/aws/aws-sdk-go-v2/config v1.32.10 - github.com/aws/aws-sdk-go-v2/credentials v1.19.10 - github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.3 - github.com/aws/aws-sdk-go-v2/service/s3 v1.96.1 - github.com/stretchr/testify v1.8.4 - google.golang.org/grpc v1.75.1 - google.golang.org/protobuf v1.36.6 - modernc.org/sqlite v1.44.3 -) - -require ( - ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 // indirect - github.com/agext/levenshtein v1.2.3 // indirect - github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect - github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 // indirect - github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 // indirect - github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.9 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 // indirect - github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 // indirect - github.com/aws/smithy-go v1.24.1 // indirect - github.com/bmatcuk/doublestar v1.3.4 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/dustin/go-humanize v1.0.1 // indirect - github.com/go-openapi/inflect v0.19.0 // indirect - github.com/google/go-cmp v0.7.0 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/hashicorp/hcl/v2 v2.18.1 // indirect - github.com/mattn/go-isatty v0.0.20 // indirect - github.com/mitchellh/go-wordwrap v1.0.1 // indirect - github.com/ncruces/go-strftime v1.0.0 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect - github.com/rogpeppe/go-internal v1.14.1 // indirect - github.com/zclconf/go-cty v1.14.4 // indirect - github.com/zclconf/go-cty-yaml v1.1.0 // indirect - golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 // indirect - golang.org/x/mod v0.29.0 // indirect - golang.org/x/net v0.46.0 // indirect - golang.org/x/sys v0.37.0 // indirect - golang.org/x/text v0.30.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect - modernc.org/libc v1.67.6 // indirect - modernc.org/mathutil v1.7.1 // indirect - modernc.org/memory v1.11.0 // indirect -) diff --git a/backup/go.sum b/backup/go.sum deleted file mode 100644 index 1b762a27d..000000000 --- a/backup/go.sum +++ /dev/null @@ -1,186 +0,0 @@ -ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9 h1:E0wvcUXTkgyN4wy4LGtNzMNGMytJN8afmIWXJVMi4cc= -ariga.io/atlas v0.32.1-0.20250325101103-175b25e1c1b9/go.mod h1:Oe1xWPuu5q9LzyrWfbZmEZxFYeu4BHTyzfjeW2aZp/w= -entgo.io/ent v0.14.5 h1:Rj2WOYJtCkWyFo6a+5wB3EfBRP0rnx1fMk6gGA0UUe4= -entgo.io/ent v0.14.5/go.mod h1:zTzLmWtPvGpmSwtkaayM2cm5m819NdM7z7tYPq3vN0U= -github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= -github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= -github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= -github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= -github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= -github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/aws/aws-sdk-go-v2 v1.41.2 h1:LuT2rzqNQsauaGkPK/7813XxcZ3o3yePY0Iy891T2ls= -github.com/aws/aws-sdk-go-v2 v1.41.2/go.mod h1:IvvlAZQXvTXznUPfRVfryiG1fbzE2NGK6m9u39YQ+S4= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5 h1:zWFmPmgw4sveAYi1mRqG+E/g0461cJ5M4bJ8/nc6d3Q= -github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.5/go.mod h1:nVUlMLVV8ycXSb7mSkcNu9e3v/1TJq2RTlrPwhYWr5c= -github.com/aws/aws-sdk-go-v2/config v1.32.10 h1:9DMthfO6XWZYLfzZglAgW5Fyou2nRI5CuV44sTedKBI= -github.com/aws/aws-sdk-go-v2/config v1.32.10/go.mod h1:2rUIOnA2JaiqYmSKYmRJlcMWy6qTj1vuRFscppSBMcw= -github.com/aws/aws-sdk-go-v2/credentials v1.19.10 h1:EEhmEUFCE1Yhl7vDhNOI5OCL/iKMdkkYFTRpZXNw7m8= -github.com/aws/aws-sdk-go-v2/credentials v1.19.10/go.mod h1:RnnlFCAlxQCkN2Q379B67USkBMu1PipEEiibzYN5UTE= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18 h1:Ii4s+Sq3yDfaMLpjrJsqD6SmG/Wq/P5L/hw2qa78UAY= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.18/go.mod h1:6x81qnY++ovptLE6nWQeWrpXxbnlIex+4H4eYYGcqfc= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.3 h1:+mQ8NQBh7B7c2FBtppRnwkrmuwFON1XQQ+5yblomZKk= -github.com/aws/aws-sdk-go-v2/feature/s3/manager v1.22.3/go.mod h1:u67RKh3BRmS4FYLH+rN3N4T5fqpd9m2ttAwBJYEdosU= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18 h1:F43zk1vemYIqPAwhjTjYIz0irU2EY7sOb/F5eJ3HuyM= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.18/go.mod h1:w1jdlZXrGKaJcNoL+Nnrj+k5wlpGXqnNrKoP22HvAug= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18 h1:xCeWVjj0ki0l3nruoyP2slHsGArMxeiiaoPN5QZH6YQ= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.18/go.mod h1:r/eLGuGCBw6l36ZRWiw6PaZwPXb6YOj+i/7MizNl5/k= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4 h1:WKuaxf++XKWlHWu9ECbMlha8WOEGm0OUEZqm4K/Gcfk= -github.com/aws/aws-sdk-go-v2/internal/ini v1.8.4/go.mod h1:ZWy7j6v1vWGmPReu0iSGvRiise4YI5SkR3OHKTZ6Wuc= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18 h1:eZioDaZGJ0tMM4gzmkNIO2aAoQd+je7Ug7TkvAzlmkU= -github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.18/go.mod h1:CCXwUKAJdoWr6/NcxZ+zsiPr6oH/Q5aTooRGYieAyj4= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5 h1:CeY9LUdur+Dxoeldqoun6y4WtJ3RQtzk0JMP2gfUay0= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.5/go.mod h1:AZLZf2fMaahW5s/wMRciu1sYbdsikT/UHwbUjOdEVTc= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.9 h1:IJRzQTvdpjHRPItx9gzNcz7Y1F+xqAR+xiy9rr5ZYl8= -github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.9.9/go.mod h1:Kzm5e6OmNH8VMkgK9t+ry5jEih4Y8whqs+1hrkxim1I= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18 h1:LTRCYFlnnKFlKsyIQxKhJuDuA3ZkrDQMRYm6rXiHlLY= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.18/go.mod h1:XhwkgGG6bHSd00nO/mexWTcTjgd6PjuvWQMqSn2UaEk= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18 h1:/A/xDuZAVD2BpsS2fftFRo/NoEKQJ8YTnJDEHBy2Gtg= -github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.18/go.mod h1:hWe9b4f+djUQGmyiGEeOnZv69dtMSgpDRIvNMvuvzvY= -github.com/aws/aws-sdk-go-v2/service/s3 v1.96.1 h1:giB30dEeoar5bgDnkE0q+z7cFjcHaCjulpmPVmuKR84= -github.com/aws/aws-sdk-go-v2/service/s3 v1.96.1/go.mod h1:071TH4M3botFLWDbzQLfBR7tXYi7Fs2RsXSiH7nlUlY= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.6 h1:MzORe+J94I+hYu2a6XmV5yC9huoTv8NRcCrUNedDypQ= -github.com/aws/aws-sdk-go-v2/service/signin v1.0.6/go.mod h1:hXzcHLARD7GeWnifd8j9RWqtfIgxj4/cAtIVIK7hg8g= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.11 h1:7oGD8KPfBOJGXiCoRKrrrQkbvCp8N++u36hrLMPey6o= -github.com/aws/aws-sdk-go-v2/service/sso v1.30.11/go.mod h1:0DO9B5EUJQlIDif+XJRWCljZRKsAFKh3gpFz7UnDtOo= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15 h1:edCcNp9eGIUDUCrzoCu1jWAXLGFIizeqkdkKgRlJwWc= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.35.15/go.mod h1:lyRQKED9xWfgkYC/wmmYfv7iVIM68Z5OQ88ZdcV1QbU= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.7 h1:NITQpgo9A5NrDZ57uOWj+abvXSb83BbyggcUBVksN7c= -github.com/aws/aws-sdk-go-v2/service/sts v1.41.7/go.mod h1:sks5UWBhEuWYDPdwlnRFn1w7xWdH29Jcpe+/PJQefEs= -github.com/aws/smithy-go v1.24.1 h1:VbyeNfmYkWoxMVpGUAbQumkODcYmfMRfZ8yQiH30SK0= -github.com/aws/smithy-go v1.24.1/go.mod h1:LEj2LM3rBRQJxPZTB4KuzZkaZYnZPnvgIhb4pu07mx0= -github.com/bmatcuk/doublestar v1.3.4 h1:gPypJ5xD31uhX6Tf54sDPUOBXTqKH4c9aPY66CyQrS0= -github.com/bmatcuk/doublestar v1.3.4/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= -github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= -github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= -github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= -github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= -github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= -github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= -github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= -github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= -github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= -github.com/hashicorp/hcl/v2 v2.18.1 h1:6nxnOJFku1EuSawSD81fuviYUV8DxFr3fp2dUi3ZYSo= -github.com/hashicorp/hcl/v2 v2.18.1/go.mod h1:ThLC89FV4p9MPW804KVbe/cEXoQ8NZEh+JtMeeGErHE= -github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= -github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= -github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= -github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= -github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= -github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= -github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= -github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= -github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= -github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= -github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= -github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= -github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc= -github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8= -github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= -github.com/zclconf/go-cty-yaml v1.1.0 h1:nP+jp0qPHv2IhUVqmQSzjvqAWcObN0KBkUl2rWBdig0= -github.com/zclconf/go-cty-yaml v1.1.0/go.mod h1:9YLUH4g7lOhVWqUbctnVlZ5KLpg7JAprQNgxSZ1Gyxs= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= -go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= -go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= -go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= -go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= -go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= -go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= -go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546 h1:mgKeJMpvi0yx/sU5GsxQ7p6s2wtOnGAHZWCHUM4KGzY= -golang.org/x/exp v0.0.0-20251023183803-a4bb9ffd2546/go.mod h1:j/pmGrbnkbPtQfxEe5D0VQhZC6qKbfKifgD0oM7sR70= -golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= -golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= -golang.org/x/net v0.46.0 h1:giFlY12I07fugqwPuWJi68oOnpfqFnJIJzaIIm2JVV4= -golang.org/x/net v0.46.0/go.mod h1:Q9BGdFy1y4nkUwiLvT5qtyhAnEHgnQ/zd8PfU6nc210= -golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= -golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= -golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= -golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= -golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= -golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= -golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= -golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= -gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= -gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7 h1:pFyd6EwwL2TqFf8emdthzeX+gZE1ElRq3iM8pui4KBY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250707201910-8d1bb00bc6a7/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A= -google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= -google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= -google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY= -google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -modernc.org/cc/v4 v4.27.1 h1:9W30zRlYrefrDV2JE2O8VDtJ1yPGownxciz5rrbQZis= -modernc.org/cc/v4 v4.27.1/go.mod h1:uVtb5OGqUKpoLWhqwNQo/8LwvoiEBLvZXIQ/SmO6mL0= -modernc.org/ccgo/v4 v4.30.1 h1:4r4U1J6Fhj98NKfSjnPUN7Ze2c6MnAdL0hWw6+LrJpc= -modernc.org/ccgo/v4 v4.30.1/go.mod h1:bIOeI1JL54Utlxn+LwrFyjCx2n2RDiYEaJVSrgdrRfM= -modernc.org/fileutil v1.3.40 h1:ZGMswMNc9JOCrcrakF1HrvmergNLAmxOPjizirpfqBA= -modernc.org/fileutil v1.3.40/go.mod h1:HxmghZSZVAz/LXcMNwZPA/DRrQZEVP9VX0V4LQGQFOc= -modernc.org/gc/v2 v2.6.5 h1:nyqdV8q46KvTpZlsw66kWqwXRHdjIlJOhG6kxiV/9xI= -modernc.org/gc/v2 v2.6.5/go.mod h1:YgIahr1ypgfe7chRuJi2gD7DBQiKSLMPgBQe9oIiito= -modernc.org/gc/v3 v3.1.1 h1:k8T3gkXWY9sEiytKhcgyiZ2L0DTyCQ/nvX+LoCljoRE= -modernc.org/gc/v3 v3.1.1/go.mod h1:HFK/6AGESC7Ex+EZJhJ2Gni6cTaYpSMmU/cT9RmlfYY= -modernc.org/goabi0 v0.2.0 h1:HvEowk7LxcPd0eq6mVOAEMai46V+i7Jrj13t4AzuNks= -modernc.org/goabi0 v0.2.0/go.mod h1:CEFRnnJhKvWT1c1JTI3Avm+tgOWbkOu5oPA8eH8LnMI= -modernc.org/libc v1.67.6 h1:eVOQvpModVLKOdT+LvBPjdQqfrZq+pC39BygcT+E7OI= -modernc.org/libc v1.67.6/go.mod h1:JAhxUVlolfYDErnwiqaLvUqc8nfb2r6S6slAgZOnaiE= -modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= -modernc.org/mathutil v1.7.1/go.mod h1:4p5IwJITfppl0G4sUEDtCr4DthTaT47/N3aT6MhfgJg= -modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= -modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= -modernc.org/opt v0.1.4 h1:2kNGMRiUjrp4LcaPuLY2PzUfqM/w9N23quVwhKt5Qm8= -modernc.org/opt v0.1.4/go.mod h1:03fq9lsNfvkYSfxrfUhZCWPk1lm4cq4N+Bh//bEtgns= -modernc.org/sortutil v1.2.1 h1:+xyoGf15mM3NMlPDnFqrteY07klSFxLElE2PVuWIJ7w= -modernc.org/sortutil v1.2.1/go.mod h1:7ZI3a3REbai7gzCLcotuw9AC4VZVpYMjDzETGsSMqJE= -modernc.org/sqlite v1.44.3 h1:+39JvV/HWMcYslAwRxHb8067w+2zowvFOUrOWIy9PjY= -modernc.org/sqlite v1.44.3/go.mod h1:CzbrU2lSB1DKUusvwGz7rqEKIq+NUd8GWuBBZDs9/nA= -modernc.org/strutil v1.2.1 h1:UneZBkQA+DX2Rp35KcM69cSsNES9ly8mQWD71HKlOA0= -modernc.org/strutil v1.2.1/go.mod h1:EHkiggD70koQxjVdSBM3JKM7k6L0FbGE5eymy9i3B9A= -modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= -modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/backup/internal/artifact/doc.go b/backup/internal/artifact/doc.go deleted file mode 100644 index c146142de..000000000 --- a/backup/internal/artifact/doc.go +++ /dev/null @@ -1 +0,0 @@ -package artifact diff --git a/backup/internal/config/doc.go b/backup/internal/config/doc.go deleted file mode 100644 index d912156be..000000000 --- a/backup/internal/config/doc.go +++ /dev/null @@ -1 +0,0 @@ -package config diff --git a/backup/internal/executor/doc.go b/backup/internal/executor/doc.go deleted file mode 100644 index be0ba73e6..000000000 --- a/backup/internal/executor/doc.go +++ /dev/null @@ -1 +0,0 @@ -package executor diff --git a/backup/internal/executor/runner.go b/backup/internal/executor/runner.go deleted file mode 100644 index 18a977280..000000000 --- a/backup/internal/executor/runner.go +++ /dev/null @@ -1,788 +0,0 @@ -package executor - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "log" - "net" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "time" - - "github.com/Wei-Shaw/sub2api/backup/ent" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/internal/s3client" - "github.com/Wei-Shaw/sub2api/backup/internal/store/entstore" -) - -const ( - defaultPollInterval = 5 * time.Second - defaultRunTimeout = 30 * time.Minute - defaultEventTimeout = 2 * time.Second - defaultRootDirectory = "/var/lib/sub2api/backups" -) - -type Options struct { - PollInterval time.Duration - RunTimeout time.Duration - Logger *log.Logger -} - -type Runner struct { - store *entstore.Store - pollInterval time.Duration - runTimeout time.Duration - logger *log.Logger - - notifyCh chan struct{} - stopCh chan struct{} - doneCh chan struct{} - - startOnce sync.Once - stopOnce sync.Once -} - -type runResult struct { - Artifact *entstore.BackupArtifactSnapshot - S3Object *entstore.BackupS3ObjectSnapshot - PartialErr error -} - -type generatedFile struct { - ArchiveName string `json:"archive_name"` - LocalPath string `json:"local_path"` - SizeBytes int64 `json:"size_bytes"` - SHA256 string `json:"sha256"` -} - -type bundleManifest struct { - JobID string `json:"job_id"` - BackupType string `json:"backup_type"` - SourceMode string `json:"source_mode"` - PostgresID string `json:"postgres_profile_id,omitempty"` - RedisID string `json:"redis_profile_id,omitempty"` - CreatedAt string `json:"created_at"` - Files []generatedFile `json:"files"` -} - -func NewRunner(store *entstore.Store, opts Options) *Runner { - poll := opts.PollInterval - if poll <= 0 { - poll = defaultPollInterval - } - runTimeout := opts.RunTimeout - if runTimeout <= 0 { - runTimeout = defaultRunTimeout - } - logger := opts.Logger - if logger == nil { - logger = log.New(os.Stdout, "[backupd-executor] ", log.LstdFlags) - } - - return &Runner{ - store: store, - pollInterval: poll, - runTimeout: runTimeout, - logger: logger, - notifyCh: make(chan struct{}, 1), - stopCh: make(chan struct{}), - doneCh: make(chan struct{}), - } -} - -func (r *Runner) Start() error { - if r == nil || r.store == nil { - return errors.New("executor store is required") - } - - var startErr error - r.startOnce.Do(func() { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - requeued, err := r.store.RequeueRunningJobs(ctx) - if err != nil { - startErr = err - return - } - if requeued > 0 { - r.logger.Printf("requeued %d running jobs after restart", requeued) - } - - go r.loop() - r.Notify() - }) - return startErr -} - -func (r *Runner) Stop(ctx context.Context) error { - if r == nil { - return nil - } - r.stopOnce.Do(func() { - close(r.stopCh) - }) - - if ctx == nil { - <-r.doneCh - return nil - } - - select { - case <-r.doneCh: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -func (r *Runner) Notify() { - if r == nil { - return - } - select { - case r.notifyCh <- struct{}{}: - default: - } -} - -func (r *Runner) loop() { - defer close(r.doneCh) - - ticker := time.NewTicker(r.pollInterval) - defer ticker.Stop() - - for { - select { - case <-r.notifyCh: - r.processQueuedJobs() - case <-ticker.C: - r.processQueuedJobs() - case <-r.stopCh: - return - } - } -} - -func (r *Runner) processQueuedJobs() { - for { - select { - case <-r.stopCh: - return - default: - } - - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) - job, err := r.store.AcquireNextQueuedJob(ctx) - cancel() - if err != nil { - if ent.IsNotFound(err) { - return - } - r.logger.Printf("acquire queued job failed: %v", err) - return - } - - r.executeJob(job) - } -} - -func (r *Runner) executeJob(job *ent.BackupJob) { - if job == nil { - return - } - - r.logEvent(job.JobID, "info", "worker", "job picked by executor", "") - - ctx, cancel := context.WithTimeout(context.Background(), r.runTimeout) - defer cancel() - - result, err := r.run(ctx, job) - finishInput := entstore.FinishBackupJobInput{ - JobID: job.JobID, - Status: backupjob.StatusFailed.String(), - } - - if err != nil { - r.logger.Printf("job %s failed: %v", job.JobID, err) - finishInput.ErrorMessage = shortenError(err) - } else { - finishInput.Artifact = result.Artifact - finishInput.S3Object = result.S3Object - switch { - case result.PartialErr != nil: - finishInput.Status = backupjob.StatusPartialSucceeded.String() - finishInput.ErrorMessage = shortenError(result.PartialErr) - default: - finishInput.Status = backupjob.StatusSucceeded.String() - } - } - - finishCtx, finishCancel := context.WithTimeout(context.Background(), 5*time.Second) - defer finishCancel() - if _, finishErr := r.store.FinishBackupJob(finishCtx, finishInput); finishErr != nil { - r.logger.Printf("job %s finish update failed: %v", job.JobID, finishErr) - } -} - -func (r *Runner) run(ctx context.Context, job *ent.BackupJob) (*runResult, error) { - cfg, err := r.store.GetConfig(ctx) - if err != nil { - return nil, fmt.Errorf("load config failed: %w", err) - } - backupType := strings.TrimSpace(job.BackupType.String()) - - effectiveConfig := *cfg - if backupType == backupjob.BackupTypePostgres.String() || backupType == backupjob.BackupTypeFull.String() { - postgresProfileID := strings.TrimSpace(job.PostgresProfileID) - if postgresProfileID != "" && postgresProfileID != strings.TrimSpace(cfg.ActivePostgresID) { - postgresProfile, profileErr := r.store.GetSourceProfile(ctx, "postgres", postgresProfileID) - if profileErr != nil { - return nil, fmt.Errorf("load postgres source profile failed: %w", profileErr) - } - effectiveConfig.Postgres = postgresProfile.Config - } - } - if backupType == backupjob.BackupTypeRedis.String() || backupType == backupjob.BackupTypeFull.String() { - redisProfileID := strings.TrimSpace(job.RedisProfileID) - if redisProfileID != "" && redisProfileID != strings.TrimSpace(cfg.ActiveRedisID) { - redisProfile, profileErr := r.store.GetSourceProfile(ctx, "redis", redisProfileID) - if profileErr != nil { - return nil, fmt.Errorf("load redis source profile failed: %w", profileErr) - } - effectiveConfig.Redis = redisProfile.Config - } - } - - uploadS3Config := cfg.S3 - profileID := strings.TrimSpace(job.S3ProfileID) - if profileID != "" && profileID != cfg.ActiveS3ProfileID { - profile, profileErr := r.store.GetS3Profile(ctx, profileID) - if profileErr != nil { - return nil, fmt.Errorf("load s3 profile failed: %w", profileErr) - } - uploadS3Config = profile.S3 - } - - backupRoot := normalizeBackupRoot(cfg.BackupRoot) - jobDir := filepath.Join( - backupRoot, - time.Now().UTC().Format("2006"), - time.Now().UTC().Format("01"), - time.Now().UTC().Format("02"), - job.JobID, - ) - if err := os.MkdirAll(jobDir, 0o750); err != nil { - return nil, fmt.Errorf("create backup directory failed: %w", err) - } - - generated := make([]generatedFile, 0, 4) - - if backupType == backupjob.BackupTypePostgres.String() || backupType == backupjob.BackupTypeFull.String() { - postgresPath := filepath.Join(jobDir, "postgres.dump") - if err := runPostgresBackup(ctx, &effectiveConfig, postgresPath); err != nil { - return nil, fmt.Errorf("postgres backup failed: %w", err) - } - gf, err := buildGeneratedFile("postgres.dump", postgresPath) - if err != nil { - return nil, err - } - generated = append(generated, gf) - r.logEvent(job.JobID, "info", "artifact", "postgres backup finished", "") - } - - if backupType == backupjob.BackupTypeRedis.String() || backupType == backupjob.BackupTypeFull.String() { - redisPath := filepath.Join(jobDir, "redis.rdb") - if err := runRedisBackup(ctx, &effectiveConfig, redisPath, job.JobID); err != nil { - return nil, fmt.Errorf("redis backup failed: %w", err) - } - gf, err := buildGeneratedFile("redis.rdb", redisPath) - if err != nil { - return nil, err - } - generated = append(generated, gf) - r.logEvent(job.JobID, "info", "artifact", "redis backup finished", "") - } - - manifest := bundleManifest{ - JobID: job.JobID, - BackupType: backupType, - SourceMode: strings.TrimSpace(effectiveConfig.SourceMode), - PostgresID: strings.TrimSpace(job.PostgresProfileID), - RedisID: strings.TrimSpace(job.RedisProfileID), - CreatedAt: time.Now().UTC().Format(time.RFC3339), - Files: generated, - } - manifestPath := filepath.Join(jobDir, "manifest.json") - if err := writeManifest(manifestPath, manifest); err != nil { - return nil, fmt.Errorf("write manifest failed: %w", err) - } - manifestGenerated, err := buildGeneratedFile("manifest.json", manifestPath) - if err != nil { - return nil, err - } - generated = append(generated, manifestGenerated) - - bundlePath := filepath.Join(jobDir, "bundle.tar.gz") - if err := writeBundle(bundlePath, generated); err != nil { - return nil, fmt.Errorf("build bundle failed: %w", err) - } - bundleSize, bundleSHA, err := fileDigest(bundlePath) - if err != nil { - return nil, fmt.Errorf("bundle hash failed: %w", err) - } - r.logEvent(job.JobID, "info", "artifact", "bundle generated", "") - - result := &runResult{ - Artifact: &entstore.BackupArtifactSnapshot{ - LocalPath: bundlePath, - SizeBytes: bundleSize, - SHA256: bundleSHA, - }, - } - - if job.UploadToS3 { - r.logEvent(job.JobID, "info", "s3", "start upload to s3", "") - s3Object, uploadErr := uploadToS3(ctx, uploadS3Config, job.JobID, bundlePath) - if uploadErr != nil { - result.PartialErr = fmt.Errorf("upload s3 failed: %w", uploadErr) - r.logEvent(job.JobID, "warning", "s3", "upload to s3 failed", shortenError(uploadErr)) - } else { - result.S3Object = s3Object - r.logEvent(job.JobID, "info", "s3", "upload to s3 finished", "") - } - } - - if err := applyRetentionPolicy(ctx, r.store, &effectiveConfig); err != nil { - r.logger.Printf("retention cleanup failed: %v", err) - } - - return result, nil -} - -func runPostgresBackup(ctx context.Context, cfg *entstore.ConfigSnapshot, destination string) error { - if cfg == nil { - return errors.New("config is nil") - } - mode := normalizeSourceMode(cfg.SourceMode) - pg := cfg.Postgres - host := defaultIfBlank(pg.Host, "127.0.0.1") - port := pg.Port - if port <= 0 { - port = 5432 - } - user := defaultIfBlank(pg.User, "postgres") - database := strings.TrimSpace(pg.Database) - if database == "" { - return errors.New("postgres.database is required") - } - - baseArgs := []string{ - "-h", host, - "-p", strconv.Itoa(int(port)), - "-U", user, - "-d", database, - "--format=custom", - "--no-owner", - "--no-privileges", - } - if strings.TrimSpace(pg.SSLMode) != "" { - baseArgs = append(baseArgs, "--sslmode", strings.TrimSpace(pg.SSLMode)) - } - - switch mode { - case "direct": - args := append([]string{}, baseArgs...) - args = append(args, "--file", destination) - env := []string{} - if strings.TrimSpace(pg.Password) != "" { - env = append(env, "PGPASSWORD="+strings.TrimSpace(pg.Password)) - } - return runCommand(ctx, "pg_dump", args, env, nil) - case "docker_exec": - container := strings.TrimSpace(pg.ContainerName) - if container == "" { - return errors.New("postgres.container_name is required in docker_exec mode") - } - outputFile, err := os.OpenFile(destination, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) - if err != nil { - return err - } - defer func() { - _ = outputFile.Close() - }() - - args := []string{"exec"} - if strings.TrimSpace(pg.Password) != "" { - args = append(args, "-e", "PGPASSWORD="+strings.TrimSpace(pg.Password)) - } - args = append(args, container, "pg_dump") - args = append(args, baseArgs...) - return runCommand(ctx, "docker", args, nil, outputFile) - default: - return fmt.Errorf("unsupported source_mode: %s", mode) - } -} - -func runRedisBackup(ctx context.Context, cfg *entstore.ConfigSnapshot, destination, jobID string) error { - if cfg == nil { - return errors.New("config is nil") - } - mode := normalizeSourceMode(cfg.SourceMode) - redisCfg := cfg.Redis - - host, port := parseRedisAddr(redisCfg.Addr) - baseArgs := []string{} - if host != "" { - baseArgs = append(baseArgs, "-h", host) - } - if port > 0 { - baseArgs = append(baseArgs, "-p", strconv.Itoa(port)) - } - if strings.TrimSpace(redisCfg.Username) != "" { - baseArgs = append(baseArgs, "--user", strings.TrimSpace(redisCfg.Username)) - } - if redisCfg.DB >= 0 { - baseArgs = append(baseArgs, "-n", strconv.Itoa(int(redisCfg.DB))) - } - - env := []string{} - if strings.TrimSpace(redisCfg.Password) != "" { - env = append(env, "REDISCLI_AUTH="+strings.TrimSpace(redisCfg.Password)) - } - - switch mode { - case "direct": - args := append([]string{}, baseArgs...) - args = append(args, "--rdb", destination) - return runCommand(ctx, "redis-cli", args, env, nil) - case "docker_exec": - container := strings.TrimSpace(redisCfg.ContainerName) - if container == "" { - return errors.New("redis.container_name is required in docker_exec mode") - } - tmpPath := fmt.Sprintf("/tmp/sub2api_%s.rdb", sanitizeFileName(jobID)) - - execArgs := []string{"exec"} - for _, item := range env { - execArgs = append(execArgs, "-e", item) - } - execArgs = append(execArgs, container, "redis-cli") - execArgs = append(execArgs, baseArgs...) - execArgs = append(execArgs, "--rdb", tmpPath) - if err := runCommand(ctx, "docker", execArgs, nil, nil); err != nil { - return err - } - - copyArgs := []string{"cp", container + ":" + tmpPath, destination} - if err := runCommand(ctx, "docker", copyArgs, nil, nil); err != nil { - _ = runCommand(ctx, "docker", []string{"exec", container, "rm", "-f", tmpPath}, nil, nil) - return err - } - _ = runCommand(ctx, "docker", []string{"exec", container, "rm", "-f", tmpPath}, nil, nil) - return nil - default: - return fmt.Errorf("unsupported source_mode: %s", mode) - } -} - -func uploadToS3(ctx context.Context, s3Cfg entstore.S3Config, jobID, bundlePath string) (*entstore.BackupS3ObjectSnapshot, error) { - if !s3Cfg.Enabled { - return nil, errors.New("s3 is disabled") - } - if strings.TrimSpace(s3Cfg.Bucket) == "" { - return nil, errors.New("s3.bucket is required") - } - if strings.TrimSpace(s3Cfg.Region) == "" { - return nil, errors.New("s3.region is required") - } - - client, err := s3client.New(ctx, s3client.Config{ - Endpoint: strings.TrimSpace(s3Cfg.Endpoint), - Region: strings.TrimSpace(s3Cfg.Region), - AccessKeyID: strings.TrimSpace(s3Cfg.AccessKeyID), - SecretAccessKey: strings.TrimSpace(s3Cfg.SecretAccessKey), - Bucket: strings.TrimSpace(s3Cfg.Bucket), - Prefix: strings.Trim(strings.TrimSpace(s3Cfg.Prefix), "/"), - ForcePathStyle: s3Cfg.ForcePathStyle, - UseSSL: s3Cfg.UseSSL, - }) - if err != nil { - return nil, err - } - - now := time.Now().UTC() - key := joinS3Key( - client.Prefix(), - now.Format("2006"), - now.Format("01"), - now.Format("02"), - jobID, - filepath.Base(bundlePath), - ) - etag, err := client.UploadFile(ctx, bundlePath, key) - if err != nil { - return nil, err - } - return &entstore.BackupS3ObjectSnapshot{ - Bucket: client.Bucket(), - Key: key, - ETag: etag, - }, nil -} - -func applyRetentionPolicy(ctx context.Context, store *entstore.Store, cfg *entstore.ConfigSnapshot) error { - if store == nil || cfg == nil { - return nil - } - keepLast := int(cfg.KeepLast) - retentionDays := int(cfg.RetentionDays) - if keepLast <= 0 && retentionDays <= 0 { - return nil - } - - items, err := store.ListFinishedJobsForRetention(ctx) - if err != nil { - return err - } - if len(items) == 0 { - return nil - } - - threshold := time.Now().AddDate(0, 0, -retentionDays) - for idx, item := range items { - if item == nil { - continue - } - keepByCount := keepLast > 0 && idx < keepLast - keepByTime := false - if retentionDays > 0 { - reference := item.CreatedAt - if item.FinishedAt != nil { - reference = *item.FinishedAt - } - keepByTime = reference.After(threshold) - } - if keepByCount || keepByTime { - continue - } - - artifactPath := strings.TrimSpace(item.ArtifactLocalPath) - if artifactPath == "" { - continue - } - if err := os.RemoveAll(filepath.Dir(artifactPath)); err != nil && !errors.Is(err, os.ErrNotExist) { - return err - } - } - return nil -} - -func buildGeneratedFile(archiveName, path string) (generatedFile, error) { - size, sum, err := fileDigest(path) - if err != nil { - return generatedFile{}, err - } - return generatedFile{ - ArchiveName: archiveName, - LocalPath: path, - SizeBytes: size, - SHA256: sum, - }, nil -} - -func writeManifest(path string, manifest bundleManifest) error { - data, err := json.MarshalIndent(manifest, "", " ") - if err != nil { - return err - } - data = append(data, '\n') - return os.WriteFile(path, data, 0o640) -} - -func writeBundle(path string, files []generatedFile) error { - output, err := os.OpenFile(path, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, 0o640) - if err != nil { - return err - } - defer func() { - _ = output.Close() - }() - - gzipWriter := gzip.NewWriter(output) - defer func() { - _ = gzipWriter.Close() - }() - - tarWriter := tar.NewWriter(gzipWriter) - defer func() { - _ = tarWriter.Close() - }() - - for _, file := range files { - info, err := os.Stat(file.LocalPath) - if err != nil { - return err - } - if !info.Mode().IsRegular() { - continue - } - - header, err := tar.FileInfoHeader(info, "") - if err != nil { - return err - } - header.Name = file.ArchiveName - if err := tarWriter.WriteHeader(header); err != nil { - return err - } - - reader, err := os.Open(file.LocalPath) - if err != nil { - return err - } - if _, err = io.Copy(tarWriter, reader); err != nil { - _ = reader.Close() - return err - } - _ = reader.Close() - } - return nil -} - -func fileDigest(path string) (int64, string, error) { - file, err := os.Open(path) - if err != nil { - return 0, "", err - } - defer func() { - _ = file.Close() - }() - - hash := sha256.New() - size, err := io.Copy(hash, file) - if err != nil { - return 0, "", err - } - return size, hex.EncodeToString(hash.Sum(nil)), nil -} - -func runCommand(ctx context.Context, name string, args []string, extraEnv []string, stdout io.Writer) error { - cmd := exec.CommandContext(ctx, name, args...) - cmd.Env = append(os.Environ(), extraEnv...) - if stdout != nil { - cmd.Stdout = stdout - } - var stderr bytes.Buffer - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - errMsg := strings.TrimSpace(stderr.String()) - if errMsg == "" { - errMsg = err.Error() - } - return fmt.Errorf("%s command failed: %s", name, sanitizeError(errMsg)) - } - return nil -} - -func (r *Runner) logEvent(jobID, level, eventType, message, payload string) { - ctx, cancel := context.WithTimeout(context.Background(), defaultEventTimeout) - defer cancel() - if err := r.store.AppendJobEvent(ctx, jobID, level, eventType, message, payload); err != nil { - r.logger.Printf("append event failed, job=%s event=%s err=%v", jobID, eventType, err) - } -} - -func normalizeSourceMode(v string) string { - mode := strings.TrimSpace(v) - if mode == "" { - return "direct" - } - return mode -} - -func normalizeBackupRoot(root string) string { - trimmed := strings.TrimSpace(root) - if trimmed == "" { - return defaultRootDirectory - } - return trimmed -} - -func parseRedisAddr(addr string) (string, int) { - trimmed := strings.TrimSpace(addr) - if trimmed == "" { - return "127.0.0.1", 6379 - } - - host, portText, err := net.SplitHostPort(trimmed) - if err != nil { - return trimmed, 6379 - } - port, err := strconv.Atoi(portText) - if err != nil || port <= 0 { - return host, 6379 - } - return host, port -} - -func joinS3Key(parts ...string) string { - filtered := make([]string, 0, len(parts)) - for _, part := range parts { - p := strings.Trim(strings.TrimSpace(part), "/") - if p == "" { - continue - } - filtered = append(filtered, p) - } - return strings.Join(filtered, "/") -} - -func sanitizeFileName(v string) string { - trimmed := strings.TrimSpace(v) - if trimmed == "" { - return "job" - } - replacer := strings.NewReplacer("/", "_", "\\", "_", "..", "_", " ", "_") - return replacer.Replace(trimmed) -} - -func sanitizeError(v string) string { - out := strings.TrimSpace(v) - out = strings.ReplaceAll(out, "\n", " ") - out = strings.ReplaceAll(out, "\r", " ") - out = strings.TrimSpace(out) - if out == "" { - return "unknown error" - } - if len(out) > 512 { - return out[:512] - } - return out -} - -func shortenError(err error) string { - if err == nil { - return "" - } - return sanitizeError(err.Error()) -} - -func defaultIfBlank(v, fallback string) string { - if strings.TrimSpace(v) == "" { - return fallback - } - return strings.TrimSpace(v) -} diff --git a/backup/internal/executor/runner_test.go b/backup/internal/executor/runner_test.go deleted file mode 100644 index 349253fcc..000000000 --- a/backup/internal/executor/runner_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package executor - -import ( - "archive/tar" - "compress/gzip" - "io" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestParseRedisAddr(t *testing.T) { - t.Parallel() - - host, port := parseRedisAddr("127.0.0.1:6380") - require.Equal(t, "127.0.0.1", host) - require.Equal(t, 6380, port) - - host, port = parseRedisAddr("localhost") - require.Equal(t, "localhost", host) - require.Equal(t, 6379, port) - - host, port = parseRedisAddr("") - require.Equal(t, "127.0.0.1", host) - require.Equal(t, 6379, port) -} - -func TestJoinS3Key(t *testing.T) { - t.Parallel() - - require.Equal(t, "a/b/c", joinS3Key("/a/", "/b", "c/")) - require.Equal(t, "a/c", joinS3Key("a", "", "c")) - require.Equal(t, "", joinS3Key("", " ", "/")) -} - -func TestSanitizeError(t *testing.T) { - t.Parallel() - - msg := sanitizeError("line1\nline2\rline3") - require.Equal(t, "line1 line2 line3", msg) - - longMsg := sanitizeError(strings.Repeat("x", 600)) - require.Len(t, longMsg, 512) -} - -func TestWriteManifestAndBundle(t *testing.T) { - t.Parallel() - - workDir := t.TempDir() - fileAPath := filepath.Join(workDir, "postgres.dump") - fileBPath := filepath.Join(workDir, "redis.rdb") - require.NoError(t, os.WriteFile(fileAPath, []byte("postgres-data"), 0o640)) - require.NoError(t, os.WriteFile(fileBPath, []byte("redis-data"), 0o640)) - - fileA, err := buildGeneratedFile("postgres.dump", fileAPath) - require.NoError(t, err) - fileB, err := buildGeneratedFile("redis.rdb", fileBPath) - require.NoError(t, err) - - manifestPath := filepath.Join(workDir, "manifest.json") - require.NoError(t, writeManifest(manifestPath, bundleManifest{ - JobID: "bk_demo", - BackupType: "full", - SourceMode: "direct", - CreatedAt: "2026-01-01T00:00:00Z", - Files: []generatedFile{fileA, fileB}, - })) - manifestFile, err := buildGeneratedFile("manifest.json", manifestPath) - require.NoError(t, err) - - bundlePath := filepath.Join(workDir, "bundle.tar.gz") - require.NoError(t, writeBundle(bundlePath, []generatedFile{fileA, fileB, manifestFile})) - - entries, err := readTarEntries(bundlePath) - require.NoError(t, err) - require.Contains(t, entries, "postgres.dump") - require.Contains(t, entries, "redis.rdb") - require.Contains(t, entries, "manifest.json") -} - -func readTarEntries(bundlePath string) ([]string, error) { - file, err := os.Open(bundlePath) - if err != nil { - return nil, err - } - defer func() { _ = file.Close() }() - - gzReader, err := gzip.NewReader(file) - if err != nil { - return nil, err - } - defer func() { _ = gzReader.Close() }() - - tarReader := tar.NewReader(gzReader) - entries := make([]string, 0, 8) - for { - header, err := tarReader.Next() - if err == io.EOF { - break - } - if err != nil { - return nil, err - } - entries = append(entries, header.Name) - } - return entries, nil -} diff --git a/backup/internal/grpcserver/doc.go b/backup/internal/grpcserver/doc.go deleted file mode 100644 index 22ab05063..000000000 --- a/backup/internal/grpcserver/doc.go +++ /dev/null @@ -1 +0,0 @@ -package grpcserver diff --git a/backup/internal/grpcserver/interceptor.go b/backup/internal/grpcserver/interceptor.go deleted file mode 100644 index c645746b2..000000000 --- a/backup/internal/grpcserver/interceptor.go +++ /dev/null @@ -1,131 +0,0 @@ -package grpcserver - -import ( - "context" - "fmt" - "log" - "strings" - "time" - - "google.golang.org/grpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -var defaultMethodTimeouts = map[string]time.Duration{ - "/backup.v1.BackupService/Health": 1 * time.Second, - "/backup.v1.BackupService/GetConfig": 2 * time.Second, - "/backup.v1.BackupService/ListBackupJobs": 2 * time.Second, - "/backup.v1.BackupService/GetBackupJob": 2 * time.Second, - "/backup.v1.BackupService/CreateBackupJob": 3 * time.Second, - "/backup.v1.BackupService/UpdateConfig": 5 * time.Second, - "/backup.v1.BackupService/ValidateS3": 5 * time.Second, -} - -func UnaryServerInterceptor(logger *log.Logger) grpc.UnaryServerInterceptor { - if logger == nil { - logger = log.Default() - } - - return func(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) { - method := "" - if info != nil { - method = info.FullMethod - } - requestID := incomingRequestID(ctx) - - if requestID != "" { - _ = grpc.SetHeader(ctx, metadata.Pairs("x-request-id", requestID)) - } - - callCtx, cancel := applyMethodTimeout(ctx, method) - defer cancel() - - start := time.Now() - defer func() { - if recovered := recover(); recovered != nil { - err = status.Error(codes.Internal, "panic recovered") - logger.Printf( - "[backupd-grpc] request_id=%s method=%s code=%s duration_ms=%d panic=%q", - requestID, - method, - codes.Internal.String(), - time.Since(start).Milliseconds(), - sanitizeLogValue(fmt.Sprint(recovered)), - ) - return - } - - err = normalizeGRPCError(err) - logger.Printf( - "[backupd-grpc] request_id=%s method=%s code=%s duration_ms=%d err=%q", - requestID, - method, - status.Code(err).String(), - time.Since(start).Milliseconds(), - sanitizeLogValue(status.Convert(err).Message()), - ) - }() - - resp, err = handler(callCtx, req) - return resp, err - } -} - -func applyMethodTimeout(ctx context.Context, method string) (context.Context, context.CancelFunc) { - timeout, ok := defaultMethodTimeouts[method] - if !ok || timeout <= 0 { - return context.WithCancel(ctx) - } - - if deadline, hasDeadline := ctx.Deadline(); hasDeadline { - if remaining := time.Until(deadline); remaining > 0 && remaining <= timeout { - return context.WithCancel(ctx) - } - } - return context.WithTimeout(ctx, timeout) -} - -func incomingRequestID(ctx context.Context) string { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - return "" - } - - for _, key := range []string{"x-request-id", "request-id", "x_request_id"} { - values := md.Get(key) - if len(values) == 0 { - continue - } - value := strings.TrimSpace(values[0]) - if value != "" { - return value - } - } - return "" -} - -func normalizeGRPCError(err error) error { - if err == nil { - return nil - } - - if _, ok := status.FromError(err); ok { - return err - } - return status.Error(codes.Internal, sanitizeLogValue(err.Error())) -} - -func sanitizeLogValue(value string) string { - normalized := strings.TrimSpace(value) - normalized = strings.ReplaceAll(normalized, "\n", " ") - normalized = strings.ReplaceAll(normalized, "\r", " ") - if normalized == "" { - return "-" - } - if len(normalized) > 512 { - return normalized[:512] - } - return normalized -} diff --git a/backup/internal/grpcserver/interceptor_test.go b/backup/internal/grpcserver/interceptor_test.go deleted file mode 100644 index cdcaa4604..000000000 --- a/backup/internal/grpcserver/interceptor_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package grpcserver - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/require" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" - "google.golang.org/grpc/status" -) - -func TestIncomingRequestID(t *testing.T) { - t.Parallel() - - ctx := metadata.NewIncomingContext(context.Background(), metadata.Pairs("x-request-id", "req-123")) - require.Equal(t, "req-123", incomingRequestID(ctx)) -} - -func TestNormalizeGRPCError(t *testing.T) { - t.Parallel() - - grpcErr := status.Error(codes.InvalidArgument, "bad") - require.Equal(t, grpcErr, normalizeGRPCError(grpcErr)) - - plain := normalizeGRPCError(errors.New("plain error")) - require.Equal(t, codes.Internal, status.Code(plain)) - require.Contains(t, status.Convert(plain).Message(), "plain error") -} - -func TestApplyMethodTimeout(t *testing.T) { - t.Parallel() - - ctx := context.Background() - callCtx, cancel := applyMethodTimeout(ctx, "/backup.v1.BackupService/Health") - defer cancel() - deadline, ok := callCtx.Deadline() - require.True(t, ok) - require.WithinDuration(t, time.Now().Add(1*time.Second), deadline, 200*time.Millisecond) - - shortCtx, shortCancel := context.WithTimeout(context.Background(), 200*time.Millisecond) - defer shortCancel() - callCtx2, cancel2 := applyMethodTimeout(shortCtx, "/backup.v1.BackupService/UpdateConfig") - defer cancel2() - deadline2, ok2 := callCtx2.Deadline() - require.True(t, ok2) - require.WithinDuration(t, time.Now().Add(200*time.Millisecond), deadline2, 200*time.Millisecond) -} diff --git a/backup/internal/grpcserver/server.go b/backup/internal/grpcserver/server.go deleted file mode 100644 index 6e453b381..000000000 --- a/backup/internal/grpcserver/server.go +++ /dev/null @@ -1,697 +0,0 @@ -package grpcserver - -import ( - "context" - "errors" - "strings" - "time" - - "github.com/Wei-Shaw/sub2api/backup/ent" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/internal/s3client" - "github.com/Wei-Shaw/sub2api/backup/internal/store/entstore" - backupv1 "github.com/Wei-Shaw/sub2api/backup/proto/backup/v1" - "github.com/aws/aws-sdk-go-v2/aws" - "github.com/aws/aws-sdk-go-v2/service/s3" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -type Server struct { - backupv1.UnimplementedBackupServiceServer - store *entstore.Store - startedAt time.Time - version string - notifier queueNotifier -} - -type queueNotifier interface { - Notify() -} - -func New(store *entstore.Store, version string, notifier queueNotifier) *Server { - if strings.TrimSpace(version) == "" { - version = "dev" - } - return &Server{ - store: store, - startedAt: time.Now(), - version: version, - notifier: notifier, - } -} - -func (s *Server) Health(_ context.Context, _ *backupv1.HealthRequest) (*backupv1.HealthResponse, error) { - return &backupv1.HealthResponse{ - Status: "SERVING", - Version: s.version, - UptimeSeconds: int64(time.Since(s.startedAt).Seconds()), - }, nil -} - -func (s *Server) GetConfig(ctx context.Context, _ *backupv1.GetConfigRequest) (*backupv1.GetConfigResponse, error) { - cfg, err := s.store.GetConfig(ctx) - if err != nil { - return nil, status.Errorf(codes.Internal, "load config failed: %v", err) - } - return &backupv1.GetConfigResponse{Config: toProtoConfig(cfg)}, nil -} - -func (s *Server) UpdateConfig(ctx context.Context, req *backupv1.UpdateConfigRequest) (*backupv1.UpdateConfigResponse, error) { - if req == nil || req.GetConfig() == nil { - return nil, status.Error(codes.InvalidArgument, "config is required") - } - cfg := req.GetConfig() - if err := validateConfig(cfg); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - updated, err := s.store.UpdateConfig(ctx, fromProtoConfig(cfg)) - if err != nil { - return nil, status.Errorf(codes.Internal, "update config failed: %v", err) - } - return &backupv1.UpdateConfigResponse{Config: toProtoConfig(updated)}, nil -} - -func (s *Server) ListSourceProfiles(ctx context.Context, req *backupv1.ListSourceProfilesRequest) (*backupv1.ListSourceProfilesResponse, error) { - sourceType := "" - if req != nil { - sourceType = strings.TrimSpace(req.GetSourceType()) - } - if sourceType == "" { - return nil, status.Error(codes.InvalidArgument, "source_type is required") - } - - items, err := s.store.ListSourceProfiles(ctx, sourceType) - if err != nil { - if mapped := mapSourceProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "list source profiles failed: %v", err) - } - - out := make([]*backupv1.SourceProfile, 0, len(items)) - for _, item := range items { - out = append(out, toProtoSourceProfile(item)) - } - return &backupv1.ListSourceProfilesResponse{Items: out}, nil -} - -func (s *Server) CreateSourceProfile(ctx context.Context, req *backupv1.CreateSourceProfileRequest) (*backupv1.CreateSourceProfileResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - if err := validateSourceProfileRequest(req.GetSourceType(), req.GetProfileId(), req.GetName(), req.GetConfig()); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - profile, err := s.store.CreateSourceProfile(ctx, entstore.CreateSourceProfileInput{ - SourceType: strings.TrimSpace(req.GetSourceType()), - ProfileID: strings.TrimSpace(req.GetProfileId()), - Name: strings.TrimSpace(req.GetName()), - Config: fromProtoSourceConfig(req.GetConfig()), - SetActive: req.GetSetActive(), - }) - if err != nil { - if mapped := mapSourceProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "create source profile failed: %v", err) - } - return &backupv1.CreateSourceProfileResponse{Profile: toProtoSourceProfile(profile)}, nil -} - -func (s *Server) UpdateSourceProfile(ctx context.Context, req *backupv1.UpdateSourceProfileRequest) (*backupv1.UpdateSourceProfileResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - if err := validateSourceProfileRequest(req.GetSourceType(), req.GetProfileId(), req.GetName(), req.GetConfig()); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - profile, err := s.store.UpdateSourceProfile(ctx, entstore.UpdateSourceProfileInput{ - SourceType: strings.TrimSpace(req.GetSourceType()), - ProfileID: strings.TrimSpace(req.GetProfileId()), - Name: strings.TrimSpace(req.GetName()), - Config: fromProtoSourceConfig(req.GetConfig()), - }) - if err != nil { - if mapped := mapSourceProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "update source profile failed: %v", err) - } - return &backupv1.UpdateSourceProfileResponse{Profile: toProtoSourceProfile(profile)}, nil -} - -func (s *Server) DeleteSourceProfile(ctx context.Context, req *backupv1.DeleteSourceProfileRequest) (*backupv1.DeleteSourceProfileResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - if strings.TrimSpace(req.GetSourceType()) == "" { - return nil, status.Error(codes.InvalidArgument, "source_type is required") - } - if strings.TrimSpace(req.GetProfileId()) == "" { - return nil, status.Error(codes.InvalidArgument, "profile_id is required") - } - - if err := s.store.DeleteSourceProfile(ctx, req.GetSourceType(), req.GetProfileId()); err != nil { - if mapped := mapSourceProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "delete source profile failed: %v", err) - } - return &backupv1.DeleteSourceProfileResponse{}, nil -} - -func (s *Server) SetActiveSourceProfile(ctx context.Context, req *backupv1.SetActiveSourceProfileRequest) (*backupv1.SetActiveSourceProfileResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - if strings.TrimSpace(req.GetSourceType()) == "" { - return nil, status.Error(codes.InvalidArgument, "source_type is required") - } - if strings.TrimSpace(req.GetProfileId()) == "" { - return nil, status.Error(codes.InvalidArgument, "profile_id is required") - } - - profile, err := s.store.SetActiveSourceProfile(ctx, req.GetSourceType(), req.GetProfileId()) - if err != nil { - if mapped := mapSourceProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "set active source profile failed: %v", err) - } - return &backupv1.SetActiveSourceProfileResponse{Profile: toProtoSourceProfile(profile)}, nil -} - -func (s *Server) ValidateS3(ctx context.Context, req *backupv1.ValidateS3Request) (*backupv1.ValidateS3Response, error) { - if req == nil || req.GetS3() == nil { - return nil, status.Error(codes.InvalidArgument, "s3 config is required") - } - s3Cfg := req.GetS3() - if strings.TrimSpace(s3Cfg.GetBucket()) == "" { - return nil, status.Error(codes.InvalidArgument, "s3.bucket is required") - } - if strings.TrimSpace(s3Cfg.GetRegion()) == "" { - return nil, status.Error(codes.InvalidArgument, "s3.region is required") - } - - client, err := s3client.New(ctx, s3client.Config{ - Endpoint: strings.TrimSpace(s3Cfg.GetEndpoint()), - Region: strings.TrimSpace(s3Cfg.GetRegion()), - AccessKeyID: strings.TrimSpace(s3Cfg.GetAccessKeyId()), - SecretAccessKey: strings.TrimSpace(s3Cfg.GetSecretAccessKey()), - Bucket: strings.TrimSpace(s3Cfg.GetBucket()), - Prefix: strings.Trim(strings.TrimSpace(s3Cfg.GetPrefix()), "/"), - ForcePathStyle: s3Cfg.GetForcePathStyle(), - UseSSL: s3Cfg.GetUseSsl(), - }) - if err != nil { - return &backupv1.ValidateS3Response{Ok: false, Message: err.Error()}, nil - } - - _, err = client.Raw().HeadBucket(ctx, &s3.HeadBucketInput{Bucket: aws.String(strings.TrimSpace(s3Cfg.GetBucket()))}) - if err != nil { - return &backupv1.ValidateS3Response{Ok: false, Message: err.Error()}, nil - } - return &backupv1.ValidateS3Response{Ok: true, Message: "ok"}, nil -} - -func (s *Server) ListS3Profiles(ctx context.Context, _ *backupv1.ListS3ProfilesRequest) (*backupv1.ListS3ProfilesResponse, error) { - items, err := s.store.ListS3Profiles(ctx) - if err != nil { - return nil, status.Errorf(codes.Internal, "list s3 profiles failed: %v", err) - } - - out := make([]*backupv1.S3Profile, 0, len(items)) - for _, item := range items { - out = append(out, toProtoS3Profile(item)) - } - return &backupv1.ListS3ProfilesResponse{Items: out}, nil -} - -func (s *Server) CreateS3Profile(ctx context.Context, req *backupv1.CreateS3ProfileRequest) (*backupv1.CreateS3ProfileResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - if err := validateS3ProfileRequest(req.GetProfileId(), req.GetName(), req.GetS3()); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - profile, err := s.store.CreateS3Profile(ctx, entstore.CreateS3ProfileInput{ - ProfileID: strings.TrimSpace(req.GetProfileId()), - Name: strings.TrimSpace(req.GetName()), - S3: fromProtoS3Config(req.GetS3()), - SetActive: req.GetSetActive(), - }) - if err != nil { - if mapped := mapS3ProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "create s3 profile failed: %v", err) - } - return &backupv1.CreateS3ProfileResponse{Profile: toProtoS3Profile(profile)}, nil -} - -func (s *Server) UpdateS3Profile(ctx context.Context, req *backupv1.UpdateS3ProfileRequest) (*backupv1.UpdateS3ProfileResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - if err := validateS3ProfileRequest(req.GetProfileId(), req.GetName(), req.GetS3()); err != nil { - return nil, status.Error(codes.InvalidArgument, err.Error()) - } - - profile, err := s.store.UpdateS3Profile(ctx, entstore.UpdateS3ProfileInput{ - ProfileID: strings.TrimSpace(req.GetProfileId()), - Name: strings.TrimSpace(req.GetName()), - S3: fromProtoS3Config(req.GetS3()), - }) - if err != nil { - if mapped := mapS3ProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "update s3 profile failed: %v", err) - } - return &backupv1.UpdateS3ProfileResponse{Profile: toProtoS3Profile(profile)}, nil -} - -func (s *Server) DeleteS3Profile(ctx context.Context, req *backupv1.DeleteS3ProfileRequest) (*backupv1.DeleteS3ProfileResponse, error) { - if req == nil || strings.TrimSpace(req.GetProfileId()) == "" { - return nil, status.Error(codes.InvalidArgument, "profile_id is required") - } - - err := s.store.DeleteS3Profile(ctx, req.GetProfileId()) - if err != nil { - if mapped := mapS3ProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "delete s3 profile failed: %v", err) - } - return &backupv1.DeleteS3ProfileResponse{}, nil -} - -func (s *Server) SetActiveS3Profile(ctx context.Context, req *backupv1.SetActiveS3ProfileRequest) (*backupv1.SetActiveS3ProfileResponse, error) { - if req == nil || strings.TrimSpace(req.GetProfileId()) == "" { - return nil, status.Error(codes.InvalidArgument, "profile_id is required") - } - - profile, err := s.store.SetActiveS3Profile(ctx, req.GetProfileId()) - if err != nil { - if mapped := mapS3ProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "set active s3 profile failed: %v", err) - } - return &backupv1.SetActiveS3ProfileResponse{Profile: toProtoS3Profile(profile)}, nil -} - -func (s *Server) CreateBackupJob(ctx context.Context, req *backupv1.CreateBackupJobRequest) (*backupv1.CreateBackupJobResponse, error) { - if req == nil { - return nil, status.Error(codes.InvalidArgument, "request is required") - } - backupType := strings.TrimSpace(req.GetBackupType()) - if !isValidBackupType(backupType) { - return nil, status.Error(codes.InvalidArgument, "invalid backup_type") - } - - job, created, err := s.store.CreateBackupJob(ctx, entstore.CreateBackupJobInput{ - BackupType: backupType, - UploadToS3: req.GetUploadToS3(), - TriggeredBy: strings.TrimSpace(req.GetTriggeredBy()), - IdempotencyKey: strings.TrimSpace(req.GetIdempotencyKey()), - S3ProfileID: strings.TrimSpace(req.GetS3ProfileId()), - PostgresID: strings.TrimSpace(req.GetPostgresProfileId()), - RedisID: strings.TrimSpace(req.GetRedisProfileId()), - }) - if err != nil { - if ent.IsNotFound(err) { - return nil, status.Error(codes.InvalidArgument, "source profile or s3 profile not found") - } - if mapped := mapSourceProfileStoreError(err); mapped != nil { - return nil, mapped - } - return nil, status.Errorf(codes.Internal, "create backup job failed: %v", err) - } - if created && s.notifier != nil { - s.notifier.Notify() - } - return &backupv1.CreateBackupJobResponse{Job: toProtoJob(job)}, nil -} - -func (s *Server) ListBackupJobs(ctx context.Context, req *backupv1.ListBackupJobsRequest) (*backupv1.ListBackupJobsResponse, error) { - if req == nil { - req = &backupv1.ListBackupJobsRequest{} - } - statusFilter := strings.TrimSpace(req.GetStatus()) - if statusFilter != "" && !isValidBackupStatus(statusFilter) { - return nil, status.Error(codes.InvalidArgument, "invalid status filter") - } - backupType := strings.TrimSpace(req.GetBackupType()) - if backupType != "" && !isValidBackupType(backupType) { - return nil, status.Error(codes.InvalidArgument, "invalid backup_type filter") - } - - out, err := s.store.ListBackupJobs(ctx, entstore.ListBackupJobsInput{ - PageSize: req.GetPageSize(), - PageToken: req.GetPageToken(), - Status: statusFilter, - BackupType: backupType, - }) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "list backup jobs failed: %v", err) - } - - items := make([]*backupv1.BackupJob, 0, len(out.Items)) - for _, item := range out.Items { - items = append(items, toProtoJob(item)) - } - return &backupv1.ListBackupJobsResponse{Items: items, NextPageToken: out.NextPageToken}, nil -} - -func (s *Server) GetBackupJob(ctx context.Context, req *backupv1.GetBackupJobRequest) (*backupv1.GetBackupJobResponse, error) { - if req == nil || strings.TrimSpace(req.GetJobId()) == "" { - return nil, status.Error(codes.InvalidArgument, "job_id is required") - } - job, err := s.store.GetBackupJob(ctx, req.GetJobId()) - if err != nil { - if ent.IsNotFound(err) { - return nil, status.Error(codes.NotFound, "backup job not found") - } - return nil, status.Errorf(codes.Internal, "get backup job failed: %v", err) - } - return &backupv1.GetBackupJobResponse{Job: toProtoJob(job)}, nil -} - -func validateConfig(cfg *backupv1.BackupConfig) error { - sourceMode := strings.TrimSpace(cfg.GetSourceMode()) - if sourceMode != "direct" && sourceMode != "docker_exec" { - return errors.New("source_mode must be direct or docker_exec") - } - if strings.TrimSpace(cfg.GetBackupRoot()) == "" { - return errors.New("backup_root is required") - } - if cfg.GetRetentionDays() <= 0 { - return errors.New("retention_days must be > 0") - } - if cfg.GetKeepLast() <= 0 { - return errors.New("keep_last must be > 0") - } - if cfg.GetPostgres() == nil { - return errors.New("postgres config is required") - } - if cfg.GetRedis() == nil { - return errors.New("redis config is required") - } - if cfg.GetS3() == nil { - return errors.New("s3 config is required") - } - return nil -} - -func validateS3ProfileRequest(profileID, name string, s3Cfg *backupv1.S3Config) error { - if strings.TrimSpace(profileID) == "" { - return errors.New("profile_id is required") - } - if strings.TrimSpace(name) == "" { - return errors.New("name is required") - } - if s3Cfg == nil { - return errors.New("s3 config is required") - } - if s3Cfg.GetEnabled() { - if strings.TrimSpace(s3Cfg.GetBucket()) == "" { - return errors.New("s3.bucket is required") - } - if strings.TrimSpace(s3Cfg.GetRegion()) == "" { - return errors.New("s3.region is required") - } - } - return nil -} - -func validateSourceProfileRequest(sourceType, profileID, name string, cfg *backupv1.SourceConfig) error { - if strings.TrimSpace(sourceType) == "" { - return errors.New("source_type is required") - } - if strings.TrimSpace(sourceType) != "postgres" && strings.TrimSpace(sourceType) != "redis" { - return errors.New("source_type must be postgres or redis") - } - if strings.TrimSpace(profileID) == "" { - return errors.New("profile_id is required") - } - if strings.TrimSpace(name) == "" { - return errors.New("name is required") - } - if cfg == nil { - return errors.New("source config is required") - } - return nil -} - -func mapS3ProfileStoreError(err error) error { - switch { - case err == nil: - return nil - case ent.IsNotFound(err): - return status.Error(codes.NotFound, "s3 profile not found") - case ent.IsConstraintError(err): - return status.Error(codes.AlreadyExists, "s3 profile already exists") - case errors.Is(err, entstore.ErrS3ProfileRequired): - return status.Error(codes.InvalidArgument, err.Error()) - case errors.Is(err, entstore.ErrActiveS3Profile), errors.Is(err, entstore.ErrS3ProfileInUse): - return status.Error(codes.FailedPrecondition, err.Error()) - default: - return nil - } -} - -func mapSourceProfileStoreError(err error) error { - switch { - case err == nil: - return nil - case ent.IsNotFound(err): - return status.Error(codes.NotFound, "source profile not found") - case ent.IsConstraintError(err): - return status.Error(codes.AlreadyExists, "source profile already exists") - case errors.Is(err, entstore.ErrSourceTypeInvalid), errors.Is(err, entstore.ErrSourceIDRequired): - return status.Error(codes.InvalidArgument, err.Error()) - case errors.Is(err, entstore.ErrSourceActive), errors.Is(err, entstore.ErrSourceInUse): - return status.Error(codes.FailedPrecondition, err.Error()) - default: - return nil - } -} - -func isValidBackupType(v string) bool { - switch v { - case backupjob.BackupTypePostgres.String(), backupjob.BackupTypeRedis.String(), backupjob.BackupTypeFull.String(): - return true - default: - return false - } -} - -func isValidBackupStatus(v string) bool { - switch v { - case backupjob.StatusQueued.String(), - backupjob.StatusRunning.String(), - backupjob.StatusSucceeded.String(), - backupjob.StatusFailed.String(), - backupjob.StatusPartialSucceeded.String(): - return true - default: - return false - } -} - -func fromProtoConfig(cfg *backupv1.BackupConfig) entstore.ConfigSnapshot { - s3Cfg := cfg.GetS3() - return entstore.ConfigSnapshot{ - SourceMode: strings.TrimSpace(cfg.GetSourceMode()), - BackupRoot: strings.TrimSpace(cfg.GetBackupRoot()), - SQLitePath: strings.TrimSpace(cfg.GetSqlitePath()), - RetentionDays: cfg.GetRetentionDays(), - KeepLast: cfg.GetKeepLast(), - Postgres: fromProtoSourceConfig(cfg.GetPostgres()), - Redis: fromProtoSourceConfig(cfg.GetRedis()), - S3: fromProtoS3Config(s3Cfg), - ActivePostgresID: strings.TrimSpace(cfg.GetActivePostgresProfileId()), - ActiveRedisID: strings.TrimSpace(cfg.GetActiveRedisProfileId()), - ActiveS3ProfileID: strings.TrimSpace(cfg.GetActiveS3ProfileId()), - } -} - -func fromProtoSourceConfig(sourceCfg *backupv1.SourceConfig) entstore.SourceConfig { - if sourceCfg == nil { - return entstore.SourceConfig{} - } - return entstore.SourceConfig{ - Host: strings.TrimSpace(sourceCfg.GetHost()), - Port: sourceCfg.GetPort(), - User: strings.TrimSpace(sourceCfg.GetUser()), - Username: strings.TrimSpace(sourceCfg.GetUsername()), - Password: strings.TrimSpace(sourceCfg.GetPassword()), - Database: strings.TrimSpace(sourceCfg.GetDatabase()), - SSLMode: strings.TrimSpace(sourceCfg.GetSslMode()), - Addr: strings.TrimSpace(sourceCfg.GetAddr()), - DB: sourceCfg.GetDb(), - ContainerName: strings.TrimSpace(sourceCfg.GetContainerName()), - } -} - -func fromProtoS3Config(s3Cfg *backupv1.S3Config) entstore.S3Config { - if s3Cfg == nil { - return entstore.S3Config{} - } - return entstore.S3Config{ - Enabled: s3Cfg.GetEnabled(), - Endpoint: strings.TrimSpace(s3Cfg.GetEndpoint()), - Region: strings.TrimSpace(s3Cfg.GetRegion()), - Bucket: strings.TrimSpace(s3Cfg.GetBucket()), - AccessKeyID: strings.TrimSpace(s3Cfg.GetAccessKeyId()), - SecretAccessKey: strings.TrimSpace(s3Cfg.GetSecretAccessKey()), - Prefix: strings.Trim(strings.TrimSpace(s3Cfg.GetPrefix()), "/"), - ForcePathStyle: s3Cfg.GetForcePathStyle(), - UseSSL: s3Cfg.GetUseSsl(), - } -} - -func toProtoConfig(cfg *entstore.ConfigSnapshot) *backupv1.BackupConfig { - if cfg == nil { - return &backupv1.BackupConfig{} - } - return &backupv1.BackupConfig{ - SourceMode: cfg.SourceMode, - BackupRoot: cfg.BackupRoot, - SqlitePath: cfg.SQLitePath, - RetentionDays: cfg.RetentionDays, - KeepLast: cfg.KeepLast, - Postgres: toProtoSourceConfig(cfg.Postgres), - Redis: toProtoSourceConfig(cfg.Redis), - S3: &backupv1.S3Config{ - Enabled: cfg.S3.Enabled, - Endpoint: cfg.S3.Endpoint, - Region: cfg.S3.Region, - Bucket: cfg.S3.Bucket, - AccessKeyId: cfg.S3.AccessKeyID, - SecretAccessKey: cfg.S3.SecretAccessKey, - Prefix: cfg.S3.Prefix, - ForcePathStyle: cfg.S3.ForcePathStyle, - UseSsl: cfg.S3.UseSSL, - }, - ActivePostgresProfileId: cfg.ActivePostgresID, - ActiveRedisProfileId: cfg.ActiveRedisID, - ActiveS3ProfileId: cfg.ActiveS3ProfileID, - } -} - -func toProtoSourceConfig(cfg entstore.SourceConfig) *backupv1.SourceConfig { - return &backupv1.SourceConfig{ - Host: cfg.Host, - Port: cfg.Port, - User: cfg.User, - Password: cfg.Password, - Database: cfg.Database, - SslMode: cfg.SSLMode, - Addr: cfg.Addr, - Username: cfg.Username, - Db: cfg.DB, - ContainerName: cfg.ContainerName, - } -} - -func toProtoS3Profile(profile *entstore.S3ProfileSnapshot) *backupv1.S3Profile { - if profile == nil { - return &backupv1.S3Profile{} - } - out := &backupv1.S3Profile{ - ProfileId: profile.ProfileID, - Name: profile.Name, - IsActive: profile.IsActive, - SecretAccessKeyConfigured: profile.SecretAccessKeyConfigured, - S3: &backupv1.S3Config{ - Enabled: profile.S3.Enabled, - Endpoint: profile.S3.Endpoint, - Region: profile.S3.Region, - Bucket: profile.S3.Bucket, - AccessKeyId: profile.S3.AccessKeyID, - Prefix: profile.S3.Prefix, - ForcePathStyle: profile.S3.ForcePathStyle, - UseSsl: profile.S3.UseSSL, - }, - } - if !profile.CreatedAt.IsZero() { - out.CreatedAt = profile.CreatedAt.UTC().Format(time.RFC3339) - } - if !profile.UpdatedAt.IsZero() { - out.UpdatedAt = profile.UpdatedAt.UTC().Format(time.RFC3339) - } - return out -} - -func toProtoSourceProfile(profile *entstore.SourceProfileSnapshot) *backupv1.SourceProfile { - if profile == nil { - return &backupv1.SourceProfile{} - } - out := &backupv1.SourceProfile{ - SourceType: profile.SourceType, - ProfileId: profile.ProfileID, - Name: profile.Name, - IsActive: profile.IsActive, - Config: toProtoSourceConfig(profile.Config), - PasswordConfigured: profile.PasswordConfigured, - } - if out.GetConfig() != nil { - out.Config.Password = "" - } - if !profile.CreatedAt.IsZero() { - out.CreatedAt = profile.CreatedAt.UTC().Format(time.RFC3339) - } - if !profile.UpdatedAt.IsZero() { - out.UpdatedAt = profile.UpdatedAt.UTC().Format(time.RFC3339) - } - return out -} - -func toProtoJob(job *ent.BackupJob) *backupv1.BackupJob { - if job == nil { - return &backupv1.BackupJob{} - } - out := &backupv1.BackupJob{ - JobId: job.JobID, - BackupType: job.BackupType.String(), - Status: job.Status.String(), - TriggeredBy: job.TriggeredBy, - IdempotencyKey: job.IdempotencyKey, - UploadToS3: job.UploadToS3, - S3ProfileId: job.S3ProfileID, - PostgresProfileId: job.PostgresProfileID, - RedisProfileId: job.RedisProfileID, - ErrorMessage: job.ErrorMessage, - Artifact: &backupv1.BackupArtifact{ - LocalPath: job.ArtifactLocalPath, - SizeBytes: nillableInt64(job.ArtifactSizeBytes), - Sha256: job.ArtifactSha256, - }, - S3Object: &backupv1.BackupS3Object{ - Bucket: job.S3Bucket, - Key: job.S3Key, - Etag: job.S3Etag, - }, - } - if job.StartedAt != nil { - out.StartedAt = job.StartedAt.UTC().Format(time.RFC3339) - } - if job.FinishedAt != nil { - out.FinishedAt = job.FinishedAt.UTC().Format(time.RFC3339) - } - return out -} - -func nillableInt64(v *int64) int64 { - if v == nil { - return 0 - } - return *v -} diff --git a/backup/internal/s3client/client.go b/backup/internal/s3client/client.go deleted file mode 100644 index 7a54475ac..000000000 --- a/backup/internal/s3client/client.go +++ /dev/null @@ -1,142 +0,0 @@ -package s3client - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/aws/aws-sdk-go-v2/aws" - awsconfig "github.com/aws/aws-sdk-go-v2/config" - "github.com/aws/aws-sdk-go-v2/credentials" - "github.com/aws/aws-sdk-go-v2/feature/s3/manager" - "github.com/aws/aws-sdk-go-v2/service/s3" -) - -type Config struct { - Endpoint string - Region string - AccessKeyID string - SecretAccessKey string - ForcePathStyle bool - UseSSL bool - Bucket string - Prefix string -} - -type Client struct { - raw *s3.Client - bucket string - prefix string -} - -func New(ctx context.Context, cfg Config) (*Client, error) { - region := strings.TrimSpace(cfg.Region) - if region == "" { - region = "us-east-1" - } - - loadOptions := []func(*awsconfig.LoadOptions) error{ - awsconfig.WithRegion(region), - } - if strings.TrimSpace(cfg.AccessKeyID) != "" || strings.TrimSpace(cfg.SecretAccessKey) != "" { - loadOptions = append(loadOptions, awsconfig.WithCredentialsProvider( - credentials.NewStaticCredentialsProvider( - strings.TrimSpace(cfg.AccessKeyID), - strings.TrimSpace(cfg.SecretAccessKey), - "", - ), - )) - } - - awsCfg, err := awsconfig.LoadDefaultConfig(ctx, loadOptions...) - if err != nil { - return nil, err - } - - client := s3.NewFromConfig(awsCfg, func(options *s3.Options) { - options.UsePathStyle = cfg.ForcePathStyle - if endpoint := normalizeEndpoint(strings.TrimSpace(cfg.Endpoint), cfg.UseSSL); endpoint != "" { - options.EndpointResolver = s3.EndpointResolverFromURL(endpoint) - } - }) - - return &Client{ - raw: client, - bucket: strings.TrimSpace(cfg.Bucket), - prefix: strings.Trim(strings.TrimSpace(cfg.Prefix), "/"), - }, nil -} - -func (c *Client) Raw() *s3.Client { - if c == nil { - return nil - } - return c.raw -} - -func (c *Client) Bucket() string { - if c == nil { - return "" - } - return c.bucket -} - -func (c *Client) Prefix() string { - if c == nil { - return "" - } - return c.prefix -} - -func (c *Client) UploadFile(ctx context.Context, localPath, key string) (string, error) { - if c == nil || c.raw == nil { - return "", fmt.Errorf("s3 client is not initialized") - } - if strings.TrimSpace(c.bucket) == "" { - return "", fmt.Errorf("s3 bucket is required") - } - - path := strings.TrimSpace(localPath) - if path == "" { - return "", fmt.Errorf("local file path is required") - } - objectKey := strings.Trim(strings.TrimSpace(key), "/") - if objectKey == "" { - objectKey = filepath.Base(path) - } - - reader, err := os.Open(path) - if err != nil { - return "", err - } - defer func() { - _ = reader.Close() - }() - - uploader := manager.NewUploader(c.raw) - out, err := uploader.Upload(ctx, &s3.PutObjectInput{ - Bucket: aws.String(c.bucket), - Key: aws.String(objectKey), - Body: reader, - }) - if err != nil { - return "", err - } - return strings.TrimSpace(aws.ToString(out.ETag)), nil -} - -func normalizeEndpoint(endpoint string, useSSL bool) string { - trimmed := strings.TrimSpace(endpoint) - if trimmed == "" { - return "" - } - if strings.HasPrefix(trimmed, "http://") || strings.HasPrefix(trimmed, "https://") { - return trimmed - } - if useSSL { - return "https://" + trimmed - } - return "http://" + trimmed -} diff --git a/backup/internal/s3client/doc.go b/backup/internal/s3client/doc.go deleted file mode 100644 index b5a3ff448..000000000 --- a/backup/internal/s3client/doc.go +++ /dev/null @@ -1 +0,0 @@ -package s3client diff --git a/backup/internal/store/entstore/doc.go b/backup/internal/store/entstore/doc.go deleted file mode 100644 index b6fa08a51..000000000 --- a/backup/internal/store/entstore/doc.go +++ /dev/null @@ -1 +0,0 @@ -package entstore diff --git a/backup/internal/store/entstore/store.go b/backup/internal/store/entstore/store.go deleted file mode 100644 index 54f8c40fb..000000000 --- a/backup/internal/store/entstore/store.go +++ /dev/null @@ -1,1881 +0,0 @@ -package entstore - -import ( - "context" - "database/sql" - "encoding/base64" - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/Wei-Shaw/sub2api/backup/ent" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/Wei-Shaw/sub2api/backup/ent/backupjobevent" - "github.com/Wei-Shaw/sub2api/backup/ent/backups3config" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsetting" - "github.com/Wei-Shaw/sub2api/backup/ent/backupsourceconfig" - - "entgo.io/ent/dialect" - entsql "entgo.io/ent/dialect/sql" - _ "modernc.org/sqlite" -) - -const ( - defaultSQLitePath = "/tmp/sub2api-backupd.db" - idempotencyWindow = 10 * time.Minute - defaultS3ProfileID = "default" - defaultSourceID = "default" -) - -var ( - ErrS3ProfileInUse = errors.New("s3 profile has queued/running jobs") - ErrActiveS3Profile = errors.New("active s3 profile cannot be deleted") - ErrS3ProfileRequired = errors.New("s3 profile_id is required") - ErrSourceTypeInvalid = errors.New("source_type must be postgres or redis") - ErrSourceIDRequired = errors.New("source profile_id is required") - ErrSourceActive = errors.New("active source profile cannot be deleted") - ErrSourceInUse = errors.New("source profile has queued/running jobs") -) - -type SourceConfig struct { - Host string - Port int32 - User string - Password string - Database string - SSLMode string - Addr string - Username string - DB int32 - ContainerName string -} - -type S3Config struct { - Enabled bool - Endpoint string - Region string - Bucket string - AccessKeyID string - SecretAccessKey string - Prefix string - ForcePathStyle bool - UseSSL bool -} - -type ConfigSnapshot struct { - SourceMode string - BackupRoot string - SQLitePath string - RetentionDays int32 - KeepLast int32 - Postgres SourceConfig - Redis SourceConfig - S3 S3Config - ActivePostgresID string - ActiveRedisID string - ActiveS3ProfileID string -} - -type SourceProfileSnapshot struct { - SourceType string - ProfileID string - Name string - IsActive bool - Config SourceConfig - PasswordConfigured bool - CreatedAt time.Time - UpdatedAt time.Time -} - -type CreateSourceProfileInput struct { - SourceType string - ProfileID string - Name string - Config SourceConfig - SetActive bool -} - -type UpdateSourceProfileInput struct { - SourceType string - ProfileID string - Name string - Config SourceConfig -} - -type S3ProfileSnapshot struct { - ProfileID string - Name string - IsActive bool - S3 S3Config - SecretAccessKeyConfigured bool - CreatedAt time.Time - UpdatedAt time.Time -} - -type CreateS3ProfileInput struct { - ProfileID string - Name string - S3 S3Config - SetActive bool -} - -type UpdateS3ProfileInput struct { - ProfileID string - Name string - S3 S3Config -} - -type CreateBackupJobInput struct { - BackupType string - UploadToS3 bool - TriggeredBy string - IdempotencyKey string - S3ProfileID string - PostgresID string - RedisID string -} - -type ListBackupJobsInput struct { - PageSize int32 - PageToken string - Status string - BackupType string -} - -type ListBackupJobsOutput struct { - Items []*ent.BackupJob - NextPageToken string -} - -type BackupArtifactSnapshot struct { - LocalPath string - SizeBytes int64 - SHA256 string -} - -type BackupS3ObjectSnapshot struct { - Bucket string - Key string - ETag string -} - -type FinishBackupJobInput struct { - JobID string - Status string - ErrorMessage string - Artifact *BackupArtifactSnapshot - S3Object *BackupS3ObjectSnapshot -} - -type Store struct { - client *ent.Client - sqlitePath string -} - -func Open(ctx context.Context, sqlitePath string) (*Store, error) { - path := normalizeSQLitePath(sqlitePath) - dsn := sqliteDSN(path) - - db, err := sql.Open("sqlite", dsn) - if err != nil { - return nil, err - } - - if _, err := db.ExecContext(ctx, "PRAGMA journal_mode=WAL;"); err != nil { - _ = db.Close() - return nil, err - } - if _, err := db.ExecContext(ctx, "PRAGMA busy_timeout=5000;"); err != nil { - _ = db.Close() - return nil, err - } - - drv := entsql.OpenDB(dialect.SQLite, db) - client := ent.NewClient(ent.Driver(drv)) - if err := client.Schema.Create(ctx); err != nil { - _ = client.Close() - return nil, err - } - - store := &Store{client: client, sqlitePath: path} - if err := store.ensureDefaults(ctx); err != nil { - _ = client.Close() - return nil, err - } - return store, nil -} - -func (s *Store) Close() error { - if s == nil || s.client == nil { - return nil - } - return s.client.Close() -} - -func (s *Store) GetConfig(ctx context.Context) (*ConfigSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - setting, err := s.client.BackupSetting.Query().Order(ent.Asc(backupsetting.FieldID)).First(ctx) - if err != nil { - return nil, err - } - postgresCfg, err := s.getActiveSourceConfigEntity(ctx, backupsourceconfig.SourceTypePostgres.String()) - if err != nil { - return nil, err - } - redisCfg, err := s.getActiveSourceConfigEntity(ctx, backupsourceconfig.SourceTypeRedis.String()) - if err != nil { - return nil, err - } - s3Cfg, err := s.getActiveS3ConfigEntity(ctx) - if err != nil { - return nil, err - } - - cfg := &ConfigSnapshot{ - SourceMode: setting.SourceMode.String(), - BackupRoot: setting.BackupRoot, - SQLitePath: setting.SqlitePath, - RetentionDays: int32(setting.RetentionDays), - KeepLast: int32(setting.KeepLast), - Postgres: SourceConfig{ - Host: postgresCfg.Host, - Port: int32(nillableInt(postgresCfg.Port)), - User: postgresCfg.Username, - Password: postgresCfg.PasswordEncrypted, - Database: postgresCfg.Database, - SSLMode: postgresCfg.SslMode, - ContainerName: postgresCfg.ContainerName, - }, - Redis: SourceConfig{ - Addr: redisCfg.Addr, - Username: redisCfg.Username, - Password: redisCfg.PasswordEncrypted, - DB: int32(nillableInt(redisCfg.RedisDb)), - ContainerName: redisCfg.ContainerName, - }, - S3: S3Config{ - Enabled: s3Cfg.Enabled, - Endpoint: s3Cfg.Endpoint, - Region: s3Cfg.Region, - Bucket: s3Cfg.Bucket, - AccessKeyID: s3Cfg.AccessKeyID, - SecretAccessKey: s3Cfg.SecretAccessKeyEncrypted, - Prefix: s3Cfg.Prefix, - ForcePathStyle: s3Cfg.ForcePathStyle, - UseSSL: s3Cfg.UseSsl, - }, - ActivePostgresID: postgresCfg.ProfileID, - ActiveRedisID: redisCfg.ProfileID, - ActiveS3ProfileID: s3Cfg.ProfileID, - } - return cfg, nil -} - -func (s *Store) UpdateConfig(ctx context.Context, cfg ConfigSnapshot) (*ConfigSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - setting, err := tx.BackupSetting.Query().Order(ent.Asc(backupsetting.FieldID)).First(ctx) - if err != nil { - return nil, err - } - updatedSetting := tx.BackupSetting.UpdateOneID(setting.ID). - SetSourceMode(backupsetting.SourceMode(cfg.SourceMode)). - SetBackupRoot(strings.TrimSpace(cfg.BackupRoot)). - SetRetentionDays(int(cfg.RetentionDays)). - SetKeepLast(int(cfg.KeepLast)). - SetSqlitePath(strings.TrimSpace(cfg.SQLitePath)) - if _, err = updatedSetting.Save(ctx); err != nil { - return nil, err - } - - if err = s.updateSourceConfigTx( - ctx, - tx, - backupsourceconfig.SourceTypePostgres.String(), - strings.TrimSpace(cfg.ActivePostgresID), - cfg.Postgres, - ); err != nil { - return nil, err - } - if err = s.updateSourceConfigTx( - ctx, - tx, - backupsourceconfig.SourceTypeRedis.String(), - strings.TrimSpace(cfg.ActiveRedisID), - cfg.Redis, - ); err != nil { - return nil, err - } - - s3Entity, err := tx.BackupS3Config.Query(). - Where(backups3config.IsActiveEQ(true)). - Order(ent.Asc(backups3config.FieldID)). - First(ctx) - if err != nil { - return nil, err - } - s3Updater := tx.BackupS3Config.UpdateOneID(s3Entity.ID). - SetEnabled(cfg.S3.Enabled). - SetEndpoint(strings.TrimSpace(cfg.S3.Endpoint)). - SetRegion(strings.TrimSpace(cfg.S3.Region)). - SetBucket(strings.TrimSpace(cfg.S3.Bucket)). - SetAccessKeyID(strings.TrimSpace(cfg.S3.AccessKeyID)). - SetPrefix(strings.Trim(strings.TrimSpace(cfg.S3.Prefix), "/")). - SetForcePathStyle(cfg.S3.ForcePathStyle). - SetUseSsl(cfg.S3.UseSSL) - if strings.TrimSpace(cfg.S3.SecretAccessKey) != "" { - s3Updater.SetSecretAccessKeyEncrypted(strings.TrimSpace(cfg.S3.SecretAccessKey)) - } - if _, err = s3Updater.Save(ctx); err != nil { - return nil, err - } - - if err = tx.Commit(); err != nil { - return nil, err - } - return s.GetConfig(ctx) -} - -func (s *Store) ListS3Profiles(ctx context.Context) ([]*S3ProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - items, err := s.client.BackupS3Config.Query(). - Order(ent.Desc(backups3config.FieldIsActive), ent.Asc(backups3config.FieldID)). - All(ctx) - if err != nil { - return nil, err - } - - out := make([]*S3ProfileSnapshot, 0, len(items)) - for _, item := range items { - out = append(out, toS3ProfileSnapshot(item)) - } - return out, nil -} - -func (s *Store) GetS3Profile(ctx context.Context, profileID string) (*S3ProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - profileID = strings.TrimSpace(profileID) - if profileID == "" { - return nil, ErrS3ProfileRequired - } - entity, err := s.client.BackupS3Config.Query(). - Where(backups3config.ProfileIDEQ(profileID)). - First(ctx) - if err != nil { - return nil, err - } - return toS3ProfileSnapshot(entity), nil -} - -func (s *Store) CreateS3Profile(ctx context.Context, input CreateS3ProfileInput) (*S3ProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - profileID := strings.TrimSpace(input.ProfileID) - if profileID == "" { - return nil, ErrS3ProfileRequired - } - name := strings.TrimSpace(input.Name) - if name == "" { - return nil, errors.New("s3 profile name is required") - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - activeCount, err := tx.BackupS3Config.Query().Where(backups3config.IsActiveEQ(true)).Count(ctx) - if err != nil { - return nil, err - } - setActive := input.SetActive || activeCount == 0 - if setActive { - if _, err = tx.BackupS3Config.Update(). - Where(backups3config.IsActiveEQ(true)). - SetIsActive(false). - Save(ctx); err != nil { - return nil, err - } - } - - builder := tx.BackupS3Config.Create(). - SetProfileID(profileID). - SetName(name). - SetIsActive(setActive). - SetEnabled(input.S3.Enabled). - SetEndpoint(strings.TrimSpace(input.S3.Endpoint)). - SetRegion(strings.TrimSpace(input.S3.Region)). - SetBucket(strings.TrimSpace(input.S3.Bucket)). - SetAccessKeyID(strings.TrimSpace(input.S3.AccessKeyID)). - SetPrefix(strings.Trim(strings.TrimSpace(input.S3.Prefix), "/")). - SetForcePathStyle(input.S3.ForcePathStyle). - SetUseSsl(input.S3.UseSSL) - if secret := strings.TrimSpace(input.S3.SecretAccessKey); secret != "" { - builder.SetSecretAccessKeyEncrypted(secret) - } - - if _, err = builder.Save(ctx); err != nil { - return nil, err - } - if err = tx.Commit(); err != nil { - return nil, err - } - return s.GetS3Profile(ctx, profileID) -} - -func (s *Store) UpdateS3Profile(ctx context.Context, input UpdateS3ProfileInput) (*S3ProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - profileID := strings.TrimSpace(input.ProfileID) - if profileID == "" { - return nil, ErrS3ProfileRequired - } - name := strings.TrimSpace(input.Name) - if name == "" { - return nil, errors.New("s3 profile name is required") - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - entity, err := tx.BackupS3Config.Query(). - Where(backups3config.ProfileIDEQ(profileID)). - First(ctx) - if err != nil { - return nil, err - } - - updater := tx.BackupS3Config.UpdateOneID(entity.ID). - SetName(name). - SetEnabled(input.S3.Enabled). - SetEndpoint(strings.TrimSpace(input.S3.Endpoint)). - SetRegion(strings.TrimSpace(input.S3.Region)). - SetBucket(strings.TrimSpace(input.S3.Bucket)). - SetAccessKeyID(strings.TrimSpace(input.S3.AccessKeyID)). - SetPrefix(strings.Trim(strings.TrimSpace(input.S3.Prefix), "/")). - SetForcePathStyle(input.S3.ForcePathStyle). - SetUseSsl(input.S3.UseSSL) - if secret := strings.TrimSpace(input.S3.SecretAccessKey); secret != "" { - updater.SetSecretAccessKeyEncrypted(secret) - } - if _, err = updater.Save(ctx); err != nil { - return nil, err - } - if err = tx.Commit(); err != nil { - return nil, err - } - return s.GetS3Profile(ctx, profileID) -} - -func (s *Store) DeleteS3Profile(ctx context.Context, profileID string) error { - if err := s.ensureDefaults(ctx); err != nil { - return err - } - - profileID = strings.TrimSpace(profileID) - if profileID == "" { - return ErrS3ProfileRequired - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - entity, err := tx.BackupS3Config.Query(). - Where(backups3config.ProfileIDEQ(profileID)). - First(ctx) - if err != nil { - return err - } - if entity.IsActive { - _ = tx.Rollback() - return ErrActiveS3Profile - } - - pendingCount, err := tx.BackupJob.Query(). - Where( - backupjob.S3ProfileIDEQ(profileID), - backupjob.UploadToS3EQ(true), - backupjob.Or( - backupjob.StatusEQ(backupjob.StatusQueued), - backupjob.StatusEQ(backupjob.StatusRunning), - ), - ). - Count(ctx) - if err != nil { - return err - } - if pendingCount > 0 { - _ = tx.Rollback() - return ErrS3ProfileInUse - } - - if err = tx.BackupS3Config.DeleteOneID(entity.ID).Exec(ctx); err != nil { - return err - } - return tx.Commit() -} - -func (s *Store) SetActiveS3Profile(ctx context.Context, profileID string) (*S3ProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - profileID = strings.TrimSpace(profileID) - if profileID == "" { - return nil, ErrS3ProfileRequired - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - entity, err := tx.BackupS3Config.Query(). - Where(backups3config.ProfileIDEQ(profileID)). - First(ctx) - if err != nil { - return nil, err - } - - if !entity.IsActive { - if _, err = tx.BackupS3Config.Update(). - Where(backups3config.IsActiveEQ(true)). - SetIsActive(false). - Save(ctx); err != nil { - return nil, err - } - if _, err = tx.BackupS3Config.UpdateOneID(entity.ID). - SetIsActive(true). - Save(ctx); err != nil { - return nil, err - } - } - - if err = tx.Commit(); err != nil { - return nil, err - } - return s.GetS3Profile(ctx, profileID) -} - -func (s *Store) ListSourceProfiles(ctx context.Context, sourceType string) ([]*SourceProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - enumType, err := parseSourceType(sourceType) - if err != nil { - return nil, err - } - items, err := s.client.BackupSourceConfig.Query(). - Where(backupsourceconfig.SourceTypeEQ(enumType)). - Order(ent.Desc(backupsourceconfig.FieldIsActive), ent.Asc(backupsourceconfig.FieldID)). - All(ctx) - if err != nil { - return nil, err - } - - out := make([]*SourceProfileSnapshot, 0, len(items)) - for _, item := range items { - out = append(out, toSourceProfileSnapshot(item)) - } - return out, nil -} - -func (s *Store) GetSourceProfile(ctx context.Context, sourceType, profileID string) (*SourceProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - enumType, err := parseSourceType(sourceType) - if err != nil { - return nil, err - } - normalizedProfileID := strings.TrimSpace(profileID) - if normalizedProfileID == "" { - return nil, ErrSourceIDRequired - } - - entity, err := s.client.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.ProfileIDEQ(normalizedProfileID), - ). - First(ctx) - if err != nil { - return nil, err - } - return toSourceProfileSnapshot(entity), nil -} - -func (s *Store) CreateSourceProfile(ctx context.Context, input CreateSourceProfileInput) (*SourceProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - enumType, err := parseSourceType(input.SourceType) - if err != nil { - return nil, err - } - profileID := strings.TrimSpace(input.ProfileID) - if profileID == "" { - return nil, ErrSourceIDRequired - } - name := strings.TrimSpace(input.Name) - if name == "" { - return nil, errors.New("source profile name is required") - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - activeCount, err := tx.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.IsActiveEQ(true), - ). - Count(ctx) - if err != nil { - return nil, err - } - setActive := input.SetActive || activeCount == 0 - if setActive { - if _, err = tx.BackupSourceConfig.Update(). - Where(backupsourceconfig.SourceTypeEQ(enumType), backupsourceconfig.IsActiveEQ(true)). - SetIsActive(false). - Save(ctx); err != nil { - return nil, err - } - } - - create := tx.BackupSourceConfig.Create(). - SetSourceType(enumType). - SetProfileID(profileID). - SetName(name). - SetIsActive(setActive) - applySourceConfigCreate(create, enumType, input.Config) - if _, err = create.Save(ctx); err != nil { - return nil, err - } - if err = tx.Commit(); err != nil { - return nil, err - } - return s.GetSourceProfile(ctx, enumType.String(), profileID) -} - -func (s *Store) UpdateSourceProfile(ctx context.Context, input UpdateSourceProfileInput) (*SourceProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - enumType, err := parseSourceType(input.SourceType) - if err != nil { - return nil, err - } - profileID := strings.TrimSpace(input.ProfileID) - if profileID == "" { - return nil, ErrSourceIDRequired - } - name := strings.TrimSpace(input.Name) - if name == "" { - return nil, errors.New("source profile name is required") - } - - entity, err := s.client.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.ProfileIDEQ(profileID), - ). - First(ctx) - if err != nil { - return nil, err - } - - updater := s.client.BackupSourceConfig.UpdateOneID(entity.ID). - SetName(name) - applySourceConfigUpdate(updater, enumType, input.Config) - if _, err = updater.Save(ctx); err != nil { - return nil, err - } - return s.GetSourceProfile(ctx, enumType.String(), profileID) -} - -func (s *Store) DeleteSourceProfile(ctx context.Context, sourceType, profileID string) error { - if err := s.ensureDefaults(ctx); err != nil { - return err - } - - enumType, err := parseSourceType(sourceType) - if err != nil { - return err - } - normalizedProfileID := strings.TrimSpace(profileID) - if normalizedProfileID == "" { - return ErrSourceIDRequired - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - entity, err := tx.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.ProfileIDEQ(normalizedProfileID), - ). - First(ctx) - if err != nil { - return err - } - if entity.IsActive { - _ = tx.Rollback() - return ErrSourceActive - } - - inUseCount := 0 - switch enumType { - case backupsourceconfig.SourceTypePostgres: - inUseCount, err = tx.BackupJob.Query(). - Where( - backupjob.PostgresProfileIDEQ(normalizedProfileID), - backupjob.Or( - backupjob.StatusEQ(backupjob.StatusQueued), - backupjob.StatusEQ(backupjob.StatusRunning), - ), - ). - Count(ctx) - case backupsourceconfig.SourceTypeRedis: - inUseCount, err = tx.BackupJob.Query(). - Where( - backupjob.RedisProfileIDEQ(normalizedProfileID), - backupjob.Or( - backupjob.StatusEQ(backupjob.StatusQueued), - backupjob.StatusEQ(backupjob.StatusRunning), - ), - ). - Count(ctx) - } - if err != nil { - return err - } - if inUseCount > 0 { - _ = tx.Rollback() - return ErrSourceInUse - } - - if err = tx.BackupSourceConfig.DeleteOneID(entity.ID).Exec(ctx); err != nil { - return err - } - return tx.Commit() -} - -func (s *Store) SetActiveSourceProfile(ctx context.Context, sourceType, profileID string) (*SourceProfileSnapshot, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, err - } - - enumType, err := parseSourceType(sourceType) - if err != nil { - return nil, err - } - normalizedProfileID := strings.TrimSpace(profileID) - if normalizedProfileID == "" { - return nil, ErrSourceIDRequired - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return nil, err - } - defer func() { - if err != nil { - _ = tx.Rollback() - } - }() - - entity, err := tx.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.ProfileIDEQ(normalizedProfileID), - ). - First(ctx) - if err != nil { - return nil, err - } - - if !entity.IsActive { - if _, err = tx.BackupSourceConfig.Update(). - Where(backupsourceconfig.SourceTypeEQ(enumType), backupsourceconfig.IsActiveEQ(true)). - SetIsActive(false). - Save(ctx); err != nil { - return nil, err - } - if _, err = tx.BackupSourceConfig.UpdateOneID(entity.ID). - SetIsActive(true). - Save(ctx); err != nil { - return nil, err - } - } - - if err = tx.Commit(); err != nil { - return nil, err - } - return s.GetSourceProfile(ctx, enumType.String(), normalizedProfileID) -} - -func (s *Store) CreateBackupJob(ctx context.Context, input CreateBackupJobInput) (*ent.BackupJob, bool, error) { - if err := s.ensureDefaults(ctx); err != nil { - return nil, false, err - } - if strings.TrimSpace(input.TriggeredBy) == "" { - input.TriggeredBy = "admin:unknown" - } - input.BackupType = strings.TrimSpace(input.BackupType) - input.S3ProfileID = strings.TrimSpace(input.S3ProfileID) - input.PostgresID = strings.TrimSpace(input.PostgresID) - input.RedisID = strings.TrimSpace(input.RedisID) - needsPostgres := backupTypeNeedsPostgres(input.BackupType) - needsRedis := backupTypeNeedsRedis(input.BackupType) - - // 仅保留本次备份类型真正需要的来源配置,避免写入无关 profile 造成“被占用”误判。 - if !needsPostgres { - input.PostgresID = "" - } - if !needsRedis { - input.RedisID = "" - } - if !input.UploadToS3 { - input.S3ProfileID = "" - } - - if needsPostgres { - resolvedID, resolveErr := s.resolveSourceProfileID(ctx, backupsourceconfig.SourceTypePostgres.String(), input.PostgresID) - if resolveErr != nil { - return nil, false, resolveErr - } - input.PostgresID = resolvedID - } - if needsRedis { - resolvedID, resolveErr := s.resolveSourceProfileID(ctx, backupsourceconfig.SourceTypeRedis.String(), input.RedisID) - if resolveErr != nil { - return nil, false, resolveErr - } - input.RedisID = resolvedID - } - - if input.S3ProfileID != "" { - if _, err := s.client.BackupS3Config.Query(). - Where(backups3config.ProfileIDEQ(input.S3ProfileID)). - First(ctx); err != nil { - return nil, false, err - } - } - now := time.Now() - - if strings.TrimSpace(input.IdempotencyKey) != "" { - existing, err := s.client.BackupJob.Query(). - Where( - backupjob.BackupTypeEQ(backupjob.BackupType(input.BackupType)), - backupjob.TriggeredByEQ(input.TriggeredBy), - backupjob.IdempotencyKeyEQ(strings.TrimSpace(input.IdempotencyKey)), - backupjob.CreatedAtGTE(now.Add(-idempotencyWindow)), - ). - Order(ent.Desc(backupjob.FieldCreatedAt), ent.Desc(backupjob.FieldID)). - First(ctx) - if err == nil { - return existing, false, nil - } - if !ent.IsNotFound(err) { - return nil, false, err - } - } - - jobID := generateJobID(now) - builder := s.client.BackupJob.Create(). - SetJobID(jobID). - SetBackupType(backupjob.BackupType(input.BackupType)). - SetStatus(backupjob.StatusQueued). - SetTriggeredBy(strings.TrimSpace(input.TriggeredBy)). - SetUploadToS3(input.UploadToS3) - if input.S3ProfileID != "" { - builder.SetS3ProfileID(input.S3ProfileID) - } - if input.PostgresID != "" { - builder.SetPostgresProfileID(input.PostgresID) - } - if input.RedisID != "" { - builder.SetRedisProfileID(input.RedisID) - } - if strings.TrimSpace(input.IdempotencyKey) != "" { - builder.SetIdempotencyKey(strings.TrimSpace(input.IdempotencyKey)) - } - - job, err := builder.Save(ctx) - if err != nil { - return nil, false, err - } - - _, _ = s.client.BackupJobEvent.Create(). - SetBackupJobID(job.ID). - SetEventType("state_change"). - SetMessage("job queued"). - Save(ctx) - - return job, true, nil -} - -func (s *Store) AcquireNextQueuedJob(ctx context.Context) (*ent.BackupJob, error) { - for { - job, err := s.client.BackupJob.Query(). - Where(backupjob.StatusEQ(backupjob.StatusQueued)). - Order(ent.Asc(backupjob.FieldCreatedAt), ent.Asc(backupjob.FieldID)). - First(ctx) - if err != nil { - return nil, err - } - - now := time.Now() - affected, err := s.client.BackupJob.Update(). - Where( - backupjob.IDEQ(job.ID), - backupjob.StatusEQ(backupjob.StatusQueued), - ). - SetStatus(backupjob.StatusRunning). - SetStartedAt(now). - ClearFinishedAt(). - ClearErrorMessage(). - Save(ctx) - if err != nil { - return nil, err - } - if affected == 0 { - // 并发下被其他 worker 抢占时继续重试下一条 queued 任务。 - continue - } - - updated, err := s.client.BackupJob.Query().Where(backupjob.IDEQ(job.ID)).First(ctx) - if err != nil { - return nil, err - } - - if err := s.appendJobEventByEntityID(ctx, updated.ID, backupjobevent.LevelInfo, "state_change", "job started", ""); err != nil { - return nil, err - } - return updated, nil - } -} - -func (s *Store) FinishBackupJob(ctx context.Context, input FinishBackupJobInput) (*ent.BackupJob, error) { - jobID := strings.TrimSpace(input.JobID) - if jobID == "" { - return nil, errors.New("job_id is required") - } - status, err := parseBackupStatus(strings.TrimSpace(input.Status)) - if err != nil { - return nil, err - } - - job, err := s.GetBackupJob(ctx, jobID) - if err != nil { - return nil, err - } - - updater := s.client.BackupJob.UpdateOneID(job.ID). - SetStatus(status). - SetFinishedAt(time.Now()) - if strings.TrimSpace(input.ErrorMessage) != "" { - updater.SetErrorMessage(strings.TrimSpace(input.ErrorMessage)) - } else { - updater.ClearErrorMessage() - } - if input.Artifact != nil { - updater.SetArtifactLocalPath(strings.TrimSpace(input.Artifact.LocalPath)) - updater.SetArtifactSha256(strings.TrimSpace(input.Artifact.SHA256)) - updater.SetNillableArtifactSizeBytes(&input.Artifact.SizeBytes) - } - if input.S3Object != nil { - updater.SetS3Bucket(strings.TrimSpace(input.S3Object.Bucket)) - updater.SetS3Key(strings.TrimSpace(input.S3Object.Key)) - updater.SetS3Etag(strings.TrimSpace(input.S3Object.ETag)) - } - updated, err := updater.Save(ctx) - if err != nil { - return nil, err - } - - eventLevel := backupjobevent.LevelInfo - if status == backupjob.StatusFailed { - eventLevel = backupjobevent.LevelError - } else if status == backupjob.StatusPartialSucceeded { - eventLevel = backupjobevent.LevelWarning - } - message := fmt.Sprintf("job finished: %s", status.String()) - if strings.TrimSpace(input.ErrorMessage) != "" { - message = message + ": " + strings.TrimSpace(input.ErrorMessage) - } - if err := s.appendJobEventByEntityID(ctx, updated.ID, eventLevel, "state_change", message, ""); err != nil { - return nil, err - } - return updated, nil -} - -func (s *Store) AppendJobEvent(ctx context.Context, jobID, level, eventType, message, payload string) error { - job, err := s.GetBackupJob(ctx, jobID) - if err != nil { - return err - } - lv, err := parseEventLevel(level) - if err != nil { - return err - } - return s.appendJobEventByEntityID( - ctx, - job.ID, - lv, - strings.TrimSpace(eventType), - strings.TrimSpace(message), - strings.TrimSpace(payload), - ) -} - -func (s *Store) RequeueRunningJobs(ctx context.Context) (int, error) { - jobs, err := s.client.BackupJob.Query(). - Where(backupjob.StatusEQ(backupjob.StatusRunning)). - All(ctx) - if err != nil { - return 0, err - } - if len(jobs) == 0 { - return 0, nil - } - - ids := make([]int, 0, len(jobs)) - for _, item := range jobs { - ids = append(ids, item.ID) - } - affected, err := s.client.BackupJob.Update(). - Where(backupjob.IDIn(ids...)). - SetStatus(backupjob.StatusQueued). - ClearFinishedAt(). - SetErrorMessage("job requeued after backupd restart"). - Save(ctx) - if err != nil { - return 0, err - } - - for _, item := range jobs { - _ = s.appendJobEventByEntityID( - ctx, - item.ID, - backupjobevent.LevelWarning, - "state_change", - "job requeued after backupd restart", - "", - ) - } - return affected, nil -} - -func (s *Store) ListFinishedJobsForRetention(ctx context.Context) ([]*ent.BackupJob, error) { - return s.client.BackupJob.Query(). - Where( - backupjob.Or( - backupjob.StatusEQ(backupjob.StatusSucceeded), - backupjob.StatusEQ(backupjob.StatusPartialSucceeded), - ), - backupjob.ArtifactLocalPathNEQ(""), - ). - Order(ent.Desc(backupjob.FieldFinishedAt), ent.Desc(backupjob.FieldID)). - All(ctx) -} - -func (s *Store) ListBackupJobs(ctx context.Context, input ListBackupJobsInput) (*ListBackupJobsOutput, error) { - pageSize := int(input.PageSize) - if pageSize <= 0 { - pageSize = 20 - } - if pageSize > 200 { - pageSize = 200 - } - offset, err := decodePageToken(input.PageToken) - if err != nil { - return nil, err - } - - query := s.client.BackupJob.Query() - if strings.TrimSpace(input.Status) != "" { - query = query.Where(backupjob.StatusEQ(backupjob.Status(strings.TrimSpace(input.Status)))) - } - if strings.TrimSpace(input.BackupType) != "" { - query = query.Where(backupjob.BackupTypeEQ(backupjob.BackupType(strings.TrimSpace(input.BackupType)))) - } - - items, err := query. - Order(ent.Desc(backupjob.FieldCreatedAt), ent.Desc(backupjob.FieldID)). - Offset(offset). - Limit(pageSize). - All(ctx) - if err != nil { - return nil, err - } - - nextToken := "" - if len(items) == pageSize { - nextToken = encodePageToken(offset + len(items)) - } - return &ListBackupJobsOutput{Items: items, NextPageToken: nextToken}, nil -} - -func (s *Store) GetBackupJob(ctx context.Context, jobID string) (*ent.BackupJob, error) { - return s.client.BackupJob.Query().Where(backupjob.JobIDEQ(strings.TrimSpace(jobID))).First(ctx) -} - -func (s *Store) getSourceConfigEntity(ctx context.Context, sourceType, profileID string) (*ent.BackupSourceConfig, error) { - enumType, err := parseSourceType(sourceType) - if err != nil { - return nil, err - } - normalizedProfileID := strings.TrimSpace(profileID) - if normalizedProfileID == "" { - return nil, ErrSourceIDRequired - } - return s.client.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.ProfileIDEQ(normalizedProfileID), - ). - First(ctx) -} - -func (s *Store) getActiveSourceConfigEntity(ctx context.Context, sourceType string) (*ent.BackupSourceConfig, error) { - enumType, err := parseSourceType(sourceType) - if err != nil { - return nil, err - } - entity, err := s.client.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.IsActiveEQ(true), - ). - Order(ent.Asc(backupsourceconfig.FieldID)). - First(ctx) - if err == nil { - return entity, nil - } - if !ent.IsNotFound(err) { - return nil, err - } - return s.client.BackupSourceConfig.Query(). - Where(backupsourceconfig.SourceTypeEQ(enumType)). - Order(ent.Asc(backupsourceconfig.FieldID)). - First(ctx) -} - -func (s *Store) getActiveS3ConfigEntity(ctx context.Context) (*ent.BackupS3Config, error) { - entity, err := s.client.BackupS3Config.Query(). - Where(backups3config.IsActiveEQ(true)). - Order(ent.Asc(backups3config.FieldID)). - First(ctx) - if err == nil { - return entity, nil - } - if !ent.IsNotFound(err) { - return nil, err - } - return s.client.BackupS3Config.Query().Order(ent.Asc(backups3config.FieldID)).First(ctx) -} - -func toS3ProfileSnapshot(entity *ent.BackupS3Config) *S3ProfileSnapshot { - if entity == nil { - return &S3ProfileSnapshot{} - } - return &S3ProfileSnapshot{ - ProfileID: entity.ProfileID, - Name: entity.Name, - IsActive: entity.IsActive, - S3: S3Config{ - Enabled: entity.Enabled, - Endpoint: entity.Endpoint, - Region: entity.Region, - Bucket: entity.Bucket, - AccessKeyID: entity.AccessKeyID, - SecretAccessKey: entity.SecretAccessKeyEncrypted, - Prefix: entity.Prefix, - ForcePathStyle: entity.ForcePathStyle, - UseSSL: entity.UseSsl, - }, - SecretAccessKeyConfigured: strings.TrimSpace(entity.SecretAccessKeyEncrypted) != "", - CreatedAt: entity.CreatedAt, - UpdatedAt: entity.UpdatedAt, - } -} - -func toSourceProfileSnapshot(entity *ent.BackupSourceConfig) *SourceProfileSnapshot { - if entity == nil { - return &SourceProfileSnapshot{} - } - config := SourceConfig{ - Host: entity.Host, - Port: int32(nillableInt(entity.Port)), - User: entity.Username, - Username: entity.Username, - Password: entity.PasswordEncrypted, - Database: entity.Database, - SSLMode: entity.SslMode, - Addr: entity.Addr, - DB: int32(nillableInt(entity.RedisDb)), - ContainerName: entity.ContainerName, - } - return &SourceProfileSnapshot{ - SourceType: entity.SourceType.String(), - ProfileID: entity.ProfileID, - Name: entity.Name, - IsActive: entity.IsActive, - Config: config, - PasswordConfigured: strings.TrimSpace(entity.PasswordEncrypted) != "", - CreatedAt: entity.CreatedAt, - UpdatedAt: entity.UpdatedAt, - } -} - -func (s *Store) appendJobEventByEntityID(ctx context.Context, backupJobID int, level backupjobevent.Level, eventType, message, payload string) error { - eventBuilder := s.client.BackupJobEvent.Create(). - SetBackupJobID(backupJobID). - SetLevel(level). - SetEventType(defaultIfBlank(eventType, "state_change")). - SetMessage(defaultIfBlank(message, "event")) - if strings.TrimSpace(payload) != "" { - eventBuilder.SetPayload(strings.TrimSpace(payload)) - } - _, err := eventBuilder.Save(ctx) - return err -} - -func (s *Store) updateSourceConfigTx(ctx context.Context, tx *ent.Tx, sourceType, profileID string, cfg SourceConfig) error { - entity, enumType, err := s.resolveSourceEntityTx(ctx, tx, sourceType, profileID) - if err != nil { - return err - } - - updater := tx.BackupSourceConfig.UpdateOneID(entity.ID) - applySourceConfigUpdate(updater, enumType, cfg) - _, err = updater.Save(ctx) - return err -} - -func (s *Store) resolveSourceEntityTx( - ctx context.Context, - tx *ent.Tx, - sourceType, - profileID string, -) (*ent.BackupSourceConfig, backupsourceconfig.SourceType, error) { - enumType, err := parseSourceType(sourceType) - if err != nil { - return nil, "", err - } - normalizedProfileID := strings.TrimSpace(profileID) - if normalizedProfileID != "" { - entity, queryErr := tx.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.ProfileIDEQ(normalizedProfileID), - ). - First(ctx) - return entity, enumType, queryErr - } - - entity, queryErr := tx.BackupSourceConfig.Query(). - Where( - backupsourceconfig.SourceTypeEQ(enumType), - backupsourceconfig.IsActiveEQ(true), - ). - Order(ent.Asc(backupsourceconfig.FieldID)). - First(ctx) - if queryErr == nil { - return entity, enumType, nil - } - if !ent.IsNotFound(queryErr) { - return nil, "", queryErr - } - entity, queryErr = tx.BackupSourceConfig.Query(). - Where(backupsourceconfig.SourceTypeEQ(enumType)). - Order(ent.Asc(backupsourceconfig.FieldID)). - First(ctx) - return entity, enumType, queryErr -} - -func applySourceConfigCreate(builder *ent.BackupSourceConfigCreate, sourceType backupsourceconfig.SourceType, cfg SourceConfig) { - applySourceConfigCore(sourceType, cfg, func(host, username, database, sslMode, addr, containerName string, port, redisDB *int) { - builder. - SetHost(host). - SetUsername(username). - SetDatabase(database). - SetSslMode(sslMode). - SetAddr(addr). - SetContainerName(containerName) - if port != nil { - builder.SetPort(*port) - } - if redisDB != nil { - builder.SetRedisDb(*redisDB) - } - }) - if password := strings.TrimSpace(cfg.Password); password != "" { - builder.SetPasswordEncrypted(password) - } -} - -func applySourceConfigUpdate(builder *ent.BackupSourceConfigUpdateOne, sourceType backupsourceconfig.SourceType, cfg SourceConfig) { - applySourceConfigCore(sourceType, cfg, func(host, username, database, sslMode, addr, containerName string, port, redisDB *int) { - builder. - SetHost(host). - SetUsername(username). - SetDatabase(database). - SetSslMode(sslMode). - SetAddr(addr). - SetContainerName(containerName) - if port != nil { - builder.SetPort(*port) - } else { - builder.ClearPort() - } - if redisDB != nil { - builder.SetRedisDb(*redisDB) - } else { - builder.ClearRedisDb() - } - }) - if password := strings.TrimSpace(cfg.Password); password != "" { - builder.SetPasswordEncrypted(password) - } -} - -func applySourceConfigCore( - sourceType backupsourceconfig.SourceType, - cfg SourceConfig, - apply func(host, username, database, sslMode, addr, containerName string, port, redisDB *int), -) { - host := strings.TrimSpace(cfg.Host) - username := strings.TrimSpace(cfg.User) - if username == "" { - username = strings.TrimSpace(cfg.Username) - } - database := strings.TrimSpace(cfg.Database) - sslMode := strings.TrimSpace(cfg.SSLMode) - addr := strings.TrimSpace(cfg.Addr) - containerName := strings.TrimSpace(cfg.ContainerName) - - var portPtr *int - if cfg.Port > 0 { - portValue := int(cfg.Port) - portPtr = &portValue - } - var redisDBPtr *int - if cfg.DB >= 0 { - dbValue := int(cfg.DB) - redisDBPtr = &dbValue - } - - switch sourceType { - case backupsourceconfig.SourceTypePostgres: - if host == "" { - host = "127.0.0.1" - } - if username == "" { - username = "postgres" - } - if database == "" { - database = "sub2api" - } - if sslMode == "" { - sslMode = "disable" - } - if portPtr == nil { - portValue := 5432 - portPtr = &portValue - } - case backupsourceconfig.SourceTypeRedis: - if addr == "" { - addr = "127.0.0.1:6379" - } - if redisDBPtr == nil { - dbValue := 0 - redisDBPtr = &dbValue - } - } - - apply(host, username, database, sslMode, addr, containerName, portPtr, redisDBPtr) -} - -func (s *Store) ensureDefaults(ctx context.Context) error { - if _, err := s.client.BackupSetting.Query().First(ctx); err != nil { - if !ent.IsNotFound(err) { - return err - } - if _, err := s.client.BackupSetting.Create(). - SetSourceMode(backupsetting.SourceModeDirect). - SetBackupRoot("/var/lib/sub2api/backups"). - SetRetentionDays(7). - SetKeepLast(30). - SetSqlitePath(s.sqlitePath). - Save(ctx); err != nil { - return err - } - } - - if err := s.ensureSourceDefaultsByType(ctx, backupsourceconfig.SourceTypePostgres, "默认 PostgreSQL", SourceConfig{ - Host: "127.0.0.1", - Port: 5432, - User: "postgres", - Database: "sub2api", - SSLMode: "disable", - }); err != nil { - return err - } - if err := s.ensureSourceDefaultsByType(ctx, backupsourceconfig.SourceTypeRedis, "默认 Redis", SourceConfig{ - Addr: "127.0.0.1:6379", - DB: 0, - }); err != nil { - return err - } - - profiles, err := s.client.BackupS3Config.Query(). - Order(ent.Asc(backups3config.FieldID)). - All(ctx) - if err != nil { - return err - } - if len(profiles) == 0 { - _, err = s.client.BackupS3Config.Create(). - SetProfileID(defaultS3ProfileID). - SetName("默认账号"). - SetIsActive(true). - SetEnabled(false). - SetEndpoint(""). - SetRegion(""). - SetBucket(""). - SetAccessKeyID(""). - SetPrefix(""). - SetForcePathStyle(false). - SetUseSsl(true). - Save(ctx) - return err - } - - used := make(map[string]struct{}, len(profiles)) - normalizedIDs := make([]string, len(profiles)) - normalizedNames := make([]string, len(profiles)) - activeIndex := -1 - needFix := false - - for idx, profile := range profiles { - profileID := strings.TrimSpace(profile.ProfileID) - if profileID == "" { - needFix = true - if idx == 0 { - profileID = defaultS3ProfileID - } else { - profileID = fmt.Sprintf("profile-%d", profile.ID) - } - } - base := profileID - seq := 2 - for { - if _, exists := used[profileID]; !exists { - break - } - needFix = true - profileID = fmt.Sprintf("%s-%d", base, seq) - seq++ - } - used[profileID] = struct{}{} - normalizedIDs[idx] = profileID - - name := strings.TrimSpace(profile.Name) - if name == "" { - needFix = true - name = profileID - } - normalizedNames[idx] = name - - if profile.IsActive { - if activeIndex == -1 { - activeIndex = idx - } else { - needFix = true - } - } - } - if activeIndex == -1 { - needFix = true - activeIndex = 0 - } - if !needFix { - return nil - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return err - } - for idx, profile := range profiles { - changed := false - updater := tx.BackupS3Config.UpdateOneID(profile.ID) - - if profile.ProfileID != normalizedIDs[idx] { - updater.SetProfileID(normalizedIDs[idx]) - changed = true - } - if strings.TrimSpace(profile.Name) != normalizedNames[idx] { - updater.SetName(normalizedNames[idx]) - changed = true - } - shouldActive := idx == activeIndex - if profile.IsActive != shouldActive { - updater.SetIsActive(shouldActive) - changed = true - } - if !changed { - continue - } - if _, err = updater.Save(ctx); err != nil { - _ = tx.Rollback() - return err - } - } - if err = tx.Commit(); err != nil { - return err - } - return nil -} - -func (s *Store) ensureSourceDefaultsByType( - ctx context.Context, - sourceType backupsourceconfig.SourceType, - defaultName string, - defaultCfg SourceConfig, -) error { - items, err := s.client.BackupSourceConfig.Query(). - Where(backupsourceconfig.SourceTypeEQ(sourceType)). - Order(ent.Asc(backupsourceconfig.FieldID)). - All(ctx) - if err != nil { - return err - } - - if len(items) == 0 { - builder := s.client.BackupSourceConfig.Create(). - SetSourceType(sourceType). - SetProfileID(defaultSourceID). - SetName(defaultName). - SetIsActive(true) - applySourceConfigCreate(builder, sourceType, defaultCfg) - _, err = builder.Save(ctx) - return err - } - - used := make(map[string]struct{}, len(items)) - normalizedIDs := make([]string, len(items)) - normalizedNames := make([]string, len(items)) - activeIndex := -1 - needFix := false - - for idx, item := range items { - profileID := strings.TrimSpace(item.ProfileID) - if profileID == "" { - needFix = true - if idx == 0 { - profileID = defaultSourceID - } else { - profileID = fmt.Sprintf("profile-%d", item.ID) - } - } - base := profileID - seq := 2 - for { - if _, exists := used[profileID]; !exists { - break - } - needFix = true - profileID = fmt.Sprintf("%s-%d", base, seq) - seq++ - } - used[profileID] = struct{}{} - normalizedIDs[idx] = profileID - - name := strings.TrimSpace(item.Name) - if name == "" { - needFix = true - name = profileID - } - normalizedNames[idx] = name - - if item.IsActive { - if activeIndex == -1 { - activeIndex = idx - } else { - needFix = true - } - } - } - if activeIndex == -1 { - needFix = true - activeIndex = 0 - } - if !needFix { - return nil - } - - tx, err := s.client.Tx(ctx) - if err != nil { - return err - } - for idx, item := range items { - changed := false - updater := tx.BackupSourceConfig.UpdateOneID(item.ID) - - if item.ProfileID != normalizedIDs[idx] { - updater.SetProfileID(normalizedIDs[idx]) - changed = true - } - if strings.TrimSpace(item.Name) != normalizedNames[idx] { - updater.SetName(normalizedNames[idx]) - changed = true - } - shouldActive := idx == activeIndex - if item.IsActive != shouldActive { - updater.SetIsActive(shouldActive) - changed = true - } - if !changed { - continue - } - if _, err = updater.Save(ctx); err != nil { - _ = tx.Rollback() - return err - } - } - if err = tx.Commit(); err != nil { - return err - } - return nil -} - -func parseSourceType(sourceType string) (backupsourceconfig.SourceType, error) { - switch strings.TrimSpace(sourceType) { - case backupsourceconfig.SourceTypePostgres.String(): - return backupsourceconfig.SourceTypePostgres, nil - case backupsourceconfig.SourceTypeRedis.String(): - return backupsourceconfig.SourceTypeRedis, nil - default: - return "", ErrSourceTypeInvalid - } -} - -func (s *Store) resolveSourceProfileID(ctx context.Context, sourceType, requestedProfileID string) (string, error) { - requestedProfileID = strings.TrimSpace(requestedProfileID) - if requestedProfileID != "" { - entity, err := s.getSourceConfigEntity(ctx, sourceType, requestedProfileID) - if err != nil { - return "", err - } - return entity.ProfileID, nil - } - - entity, err := s.getActiveSourceConfigEntity(ctx, sourceType) - if err != nil { - return "", err - } - return strings.TrimSpace(entity.ProfileID), nil -} - -func backupTypeNeedsPostgres(backupType string) bool { - switch strings.TrimSpace(backupType) { - case backupjob.BackupTypePostgres.String(), backupjob.BackupTypeFull.String(): - return true - default: - return false - } -} - -func backupTypeNeedsRedis(backupType string) bool { - switch strings.TrimSpace(backupType) { - case backupjob.BackupTypeRedis.String(), backupjob.BackupTypeFull.String(): - return true - default: - return false - } -} - -func normalizeSQLitePath(sqlitePath string) string { - path := strings.TrimSpace(sqlitePath) - if path == "" { - return defaultSQLitePath - } - return path -} - -func sqliteDSN(path string) string { - dsn := path - if !strings.HasPrefix(path, "file:") { - dsn = "file:" + path - } - - params := make([]string, 0, 2) - if !strings.Contains(dsn, "_fk=1") { - params = append(params, "_fk=1") - } - if !strings.Contains(dsn, "_pragma=foreign_keys(1)") && !strings.Contains(dsn, "_pragma=foreign_keys%281%29") { - params = append(params, "_pragma=foreign_keys(1)") - } - if len(params) == 0 { - return dsn - } - separator := "?" - if strings.Contains(dsn, "?") { - separator = "&" - } - return dsn + separator + strings.Join(params, "&") -} - -func nillableInt(v *int) int { - if v == nil { - return 0 - } - return *v -} - -func generateJobID(now time.Time) string { - timestamp := now.UTC().Format("20060102_150405") - suffix := strconv.FormatInt(now.UnixNano()%0xffffff, 16) - if len(suffix) < 6 { - suffix = strings.Repeat("0", 6-len(suffix)) + suffix - } - return fmt.Sprintf("bk_%s_%s", timestamp, suffix) -} - -func decodePageToken(token string) (int, error) { - t := strings.TrimSpace(token) - if t == "" { - return 0, nil - } - raw, err := base64.StdEncoding.DecodeString(t) - if err != nil { - return 0, err - } - offset, err := strconv.Atoi(string(raw)) - if err != nil { - return 0, err - } - if offset < 0 { - return 0, errors.New("negative page token") - } - return offset, nil -} - -func encodePageToken(offset int) string { - if offset <= 0 { - return "" - } - return base64.StdEncoding.EncodeToString([]byte(strconv.Itoa(offset))) -} - -func parseBackupStatus(v string) (backupjob.Status, error) { - switch strings.TrimSpace(v) { - case backupjob.StatusQueued.String(): - return backupjob.StatusQueued, nil - case backupjob.StatusRunning.String(): - return backupjob.StatusRunning, nil - case backupjob.StatusSucceeded.String(): - return backupjob.StatusSucceeded, nil - case backupjob.StatusFailed.String(): - return backupjob.StatusFailed, nil - case backupjob.StatusPartialSucceeded.String(): - return backupjob.StatusPartialSucceeded, nil - default: - return "", fmt.Errorf("invalid backup status: %s", v) - } -} - -func parseEventLevel(v string) (backupjobevent.Level, error) { - switch strings.TrimSpace(v) { - case "", backupjobevent.LevelInfo.String(): - return backupjobevent.LevelInfo, nil - case backupjobevent.LevelWarning.String(): - return backupjobevent.LevelWarning, nil - case backupjobevent.LevelError.String(): - return backupjobevent.LevelError, nil - default: - return "", fmt.Errorf("invalid event level: %s", v) - } -} - -func defaultIfBlank(v, fallback string) string { - trimmed := strings.TrimSpace(v) - if trimmed == "" { - return fallback - } - return trimmed -} diff --git a/backup/internal/store/entstore/store_test.go b/backup/internal/store/entstore/store_test.go deleted file mode 100644 index e676b0dea..000000000 --- a/backup/internal/store/entstore/store_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package entstore - -import ( - "context" - "errors" - "path/filepath" - "testing" - "time" - - "github.com/Wei-Shaw/sub2api/backup/ent/backupjob" - "github.com/stretchr/testify/require" -) - -func TestStore_CreateAcquireFinishBackupJob(t *testing.T) { - store := openTestStore(t) - - job, created, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ - BackupType: backupjob.BackupTypePostgres.String(), - UploadToS3: true, - TriggeredBy: "admin:1", - IdempotencyKey: "idem-1", - }) - require.NoError(t, err) - require.True(t, created) - require.Equal(t, backupjob.StatusQueued, job.Status) - - acquired, err := store.AcquireNextQueuedJob(context.Background()) - require.NoError(t, err) - require.Equal(t, job.JobID, acquired.JobID) - require.Equal(t, backupjob.StatusRunning, acquired.Status) - require.NotNil(t, acquired.StartedAt) - - size := int64(1024) - finished, err := store.FinishBackupJob(context.Background(), FinishBackupJobInput{ - JobID: acquired.JobID, - Status: backupjob.StatusSucceeded.String(), - Artifact: &BackupArtifactSnapshot{ - LocalPath: "/tmp/demo/bundle.tar.gz", - SizeBytes: size, - SHA256: "sha256-demo", - }, - S3Object: &BackupS3ObjectSnapshot{ - Bucket: "bucket-demo", - Key: "demo/key", - ETag: "etag-demo", - }, - }) - require.NoError(t, err) - require.Equal(t, backupjob.StatusSucceeded, finished.Status) - require.NotNil(t, finished.FinishedAt) - require.Equal(t, "/tmp/demo/bundle.tar.gz", finished.ArtifactLocalPath) - require.Equal(t, "sha256-demo", finished.ArtifactSha256) - require.NotNil(t, finished.ArtifactSizeBytes) - require.Equal(t, size, *finished.ArtifactSizeBytes) - require.Equal(t, "bucket-demo", finished.S3Bucket) - require.Equal(t, "demo/key", finished.S3Key) -} - -func TestStore_CreateBackupJob_Idempotency(t *testing.T) { - store := openTestStore(t) - - first, created, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ - BackupType: backupjob.BackupTypeRedis.String(), - UploadToS3: false, - TriggeredBy: "admin:2", - IdempotencyKey: "idem-same", - }) - require.NoError(t, err) - require.True(t, created) - - second, created, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ - BackupType: backupjob.BackupTypeRedis.String(), - UploadToS3: false, - TriggeredBy: "admin:2", - IdempotencyKey: "idem-same", - }) - require.NoError(t, err) - require.False(t, created) - require.Equal(t, first.JobID, second.JobID) -} - -func TestStore_RequeueRunningJobs(t *testing.T) { - store := openTestStore(t) - - _, _, err := store.CreateBackupJob(context.Background(), CreateBackupJobInput{ - BackupType: backupjob.BackupTypeFull.String(), - UploadToS3: false, - TriggeredBy: "admin:3", - }) - require.NoError(t, err) - - acquired, err := store.AcquireNextQueuedJob(context.Background()) - require.NoError(t, err) - require.Equal(t, backupjob.StatusRunning, acquired.Status) - - count, err := store.RequeueRunningJobs(context.Background()) - require.NoError(t, err) - require.Equal(t, 1, count) - - job, err := store.GetBackupJob(context.Background(), acquired.JobID) - require.NoError(t, err) - require.Equal(t, backupjob.StatusQueued, job.Status) - require.Equal(t, "job requeued after backupd restart", job.ErrorMessage) -} - -func TestStore_UpdateConfig_KeepSecretWhenEmpty(t *testing.T) { - store := openTestStore(t) - ctx := context.Background() - - cfg, err := store.GetConfig(ctx) - require.NoError(t, err) - cfg.SourceMode = "direct" - cfg.BackupRoot = filepath.Join(t.TempDir(), "backups") - cfg.SQLitePath = filepath.Join(t.TempDir(), "meta.db") - cfg.RetentionDays = 7 - cfg.KeepLast = 30 - cfg.Postgres.Password = "pg-secret" - cfg.Redis.Password = "redis-secret" - cfg.S3.SecretAccessKey = "s3-secret" - cfg.S3.Region = "us-east-1" - cfg.S3.Bucket = "demo-bucket" - cfg.S3.AccessKeyID = "demo-ak" - _, err = store.UpdateConfig(ctx, *cfg) - require.NoError(t, err) - - cfg2, err := store.GetConfig(ctx) - require.NoError(t, err) - cfg2.Postgres.Password = "" - cfg2.Redis.Password = "" - cfg2.S3.SecretAccessKey = "" - _, err = store.UpdateConfig(ctx, *cfg2) - require.NoError(t, err) - - finalCfg, err := store.GetConfig(ctx) - require.NoError(t, err) - require.Equal(t, "pg-secret", finalCfg.Postgres.Password) - require.Equal(t, "redis-secret", finalCfg.Redis.Password) - require.Equal(t, "s3-secret", finalCfg.S3.SecretAccessKey) -} - -func TestStore_S3ProfilesLifecycle(t *testing.T) { - store := openTestStore(t) - ctx := context.Background() - - initialProfiles, err := store.ListS3Profiles(ctx) - require.NoError(t, err) - require.Len(t, initialProfiles, 1) - require.Equal(t, defaultS3ProfileID, initialProfiles[0].ProfileID) - require.True(t, initialProfiles[0].IsActive) - - created, err := store.CreateS3Profile(ctx, CreateS3ProfileInput{ - ProfileID: "archive", - Name: "归档账号", - S3: S3Config{ - Enabled: true, - Region: "us-east-1", - Bucket: "archive-bucket", - AccessKeyID: "archive-ak", - SecretAccessKey: "archive-sk", - UseSSL: true, - }, - SetActive: false, - }) - require.NoError(t, err) - require.Equal(t, "archive", created.ProfileID) - require.False(t, created.IsActive) - require.True(t, created.SecretAccessKeyConfigured) - - updated, err := store.UpdateS3Profile(ctx, UpdateS3ProfileInput{ - ProfileID: "archive", - Name: "归档账号-更新", - S3: S3Config{ - Enabled: true, - Region: "us-east-1", - Bucket: "archive-bucket-updated", - AccessKeyID: "archive-ak-2", - SecretAccessKey: "", - UseSSL: true, - }, - }) - require.NoError(t, err) - require.Equal(t, "归档账号-更新", updated.Name) - require.Equal(t, "archive-ak-2", updated.S3.AccessKeyID) - require.Equal(t, "archive-sk", updated.S3.SecretAccessKey) - - active, err := store.SetActiveS3Profile(ctx, "archive") - require.NoError(t, err) - require.True(t, active.IsActive) - - cfg, err := store.GetConfig(ctx) - require.NoError(t, err) - require.Equal(t, "archive", cfg.ActiveS3ProfileID) - require.Equal(t, "archive-bucket-updated", cfg.S3.Bucket) - - err = store.DeleteS3Profile(ctx, "archive") - require.Error(t, err) - require.True(t, errors.Is(err, ErrActiveS3Profile)) - - _, err = store.SetActiveS3Profile(ctx, defaultS3ProfileID) - require.NoError(t, err) - require.NoError(t, store.DeleteS3Profile(ctx, "archive")) -} - -func TestStore_DeleteS3ProfileInUse(t *testing.T) { - store := openTestStore(t) - ctx := context.Background() - - _, err := store.CreateS3Profile(ctx, CreateS3ProfileInput{ - ProfileID: "for-job", - Name: "任务账号", - S3: S3Config{ - Enabled: true, - Region: "us-east-1", - Bucket: "job-bucket", - UseSSL: true, - }, - }) - require.NoError(t, err) - - _, _, err = store.CreateBackupJob(ctx, CreateBackupJobInput{ - BackupType: backupjob.BackupTypePostgres.String(), - UploadToS3: true, - TriggeredBy: "admin:9", - S3ProfileID: "for-job", - }) - require.NoError(t, err) - - err = store.DeleteS3Profile(ctx, "for-job") - require.Error(t, err) - require.True(t, errors.Is(err, ErrS3ProfileInUse)) -} - -func TestStore_SourceProfilesLifecycle(t *testing.T) { - store := openTestStore(t) - ctx := context.Background() - - initialPG, err := store.ListSourceProfiles(ctx, "postgres") - require.NoError(t, err) - require.Len(t, initialPG, 1) - require.Equal(t, defaultSourceID, initialPG[0].ProfileID) - require.True(t, initialPG[0].IsActive) - - created, err := store.CreateSourceProfile(ctx, CreateSourceProfileInput{ - SourceType: "postgres", - ProfileID: "pg-reporting", - Name: "报表库", - Config: SourceConfig{ - Host: "10.0.0.10", - Port: 15432, - User: "report_user", - Password: "secret", - Database: "reporting", - SSLMode: "require", - }, - SetActive: false, - }) - require.NoError(t, err) - require.Equal(t, "pg-reporting", created.ProfileID) - require.False(t, created.IsActive) - require.True(t, created.PasswordConfigured) - - active, err := store.SetActiveSourceProfile(ctx, "postgres", "pg-reporting") - require.NoError(t, err) - require.True(t, active.IsActive) - - cfg, err := store.GetConfig(ctx) - require.NoError(t, err) - require.Equal(t, "pg-reporting", cfg.ActivePostgresID) - require.Equal(t, "10.0.0.10", cfg.Postgres.Host) - require.Equal(t, int32(15432), cfg.Postgres.Port) - - err = store.DeleteSourceProfile(ctx, "postgres", "pg-reporting") - require.Error(t, err) - require.True(t, errors.Is(err, ErrSourceActive)) - - _, err = store.SetActiveSourceProfile(ctx, "postgres", defaultSourceID) - require.NoError(t, err) - require.NoError(t, store.DeleteSourceProfile(ctx, "postgres", "pg-reporting")) -} - -func TestStore_CreateBackupJob_WithSelectedSourceProfiles(t *testing.T) { - store := openTestStore(t) - ctx := context.Background() - - _, err := store.CreateSourceProfile(ctx, CreateSourceProfileInput{ - SourceType: "postgres", - ProfileID: "pg-custom", - Name: "自定义PG", - Config: SourceConfig{ - Host: "127.0.0.2", - Port: 6432, - User: "custom_user", - Database: "custom_db", - SSLMode: "disable", - }, - }) - require.NoError(t, err) - - _, err = store.CreateSourceProfile(ctx, CreateSourceProfileInput{ - SourceType: "redis", - ProfileID: "redis-custom", - Name: "自定义Redis", - Config: SourceConfig{ - Addr: "127.0.0.3:6380", - DB: 5, - }, - }) - require.NoError(t, err) - - job, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ - BackupType: backupjob.BackupTypeFull.String(), - TriggeredBy: "admin:10", - PostgresID: "pg-custom", - RedisID: "redis-custom", - }) - require.NoError(t, err) - require.True(t, created) - require.Equal(t, "pg-custom", job.PostgresProfileID) - require.Equal(t, "redis-custom", job.RedisProfileID) -} - -func TestStore_CreateBackupJob_IgnoreUnusedProfilesAndS3(t *testing.T) { - store := openTestStore(t) - ctx := context.Background() - - pgJob, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ - BackupType: backupjob.BackupTypePostgres.String(), - TriggeredBy: "admin:11", - RedisID: "redis-should-be-ignored", - }) - require.NoError(t, err) - require.True(t, created) - require.Empty(t, pgJob.RedisProfileID) - require.NotEmpty(t, pgJob.PostgresProfileID) - - redisJob, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ - BackupType: backupjob.BackupTypeRedis.String(), - TriggeredBy: "admin:12", - PostgresID: "postgres-should-be-ignored", - }) - require.NoError(t, err) - require.True(t, created) - require.Empty(t, redisJob.PostgresProfileID) - require.NotEmpty(t, redisJob.RedisProfileID) - - noS3Job, created, err := store.CreateBackupJob(ctx, CreateBackupJobInput{ - BackupType: backupjob.BackupTypePostgres.String(), - TriggeredBy: "admin:13", - UploadToS3: false, - S3ProfileID: "missing-profile", - }) - require.NoError(t, err) - require.True(t, created) - require.Empty(t, noS3Job.S3ProfileID) - - _, _, err = store.CreateBackupJob(ctx, CreateBackupJobInput{ - BackupType: backupjob.BackupTypePostgres.String(), - TriggeredBy: "admin:14", - UploadToS3: true, - S3ProfileID: "missing-profile", - }) - require.Error(t, err) -} - -func openTestStore(t *testing.T) *Store { - t.Helper() - - dbPath := filepath.Join(t.TempDir(), "backupd-test-"+time.Now().Format("150405.000")+".db") - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - store, err := Open(ctx, dbPath) - require.NoError(t, err) - t.Cleanup(func() { - _ = store.Close() - }) - return store -} diff --git a/backup/proto/backup/v1/backup.pb.go b/backup/proto/backup/v1/backup.pb.go deleted file mode 100644 index 30e288ef5..000000000 --- a/backup/proto/backup/v1/backup.pb.go +++ /dev/null @@ -1,2877 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.10 -// protoc v6.32.1 -// source: proto/backup/v1/backup.proto - -package backupv1 - -import ( - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type HealthRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *HealthRequest) Reset() { - *x = HealthRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *HealthRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthRequest) ProtoMessage() {} - -func (x *HealthRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthRequest.ProtoReflect.Descriptor instead. -func (*HealthRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{0} -} - -type HealthResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Status string `protobuf:"bytes,1,opt,name=status,proto3" json:"status,omitempty"` - Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` - UptimeSeconds int64 `protobuf:"varint,3,opt,name=uptime_seconds,json=uptimeSeconds,proto3" json:"uptime_seconds,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *HealthResponse) Reset() { - *x = HealthResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *HealthResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*HealthResponse) ProtoMessage() {} - -func (x *HealthResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use HealthResponse.ProtoReflect.Descriptor instead. -func (*HealthResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{1} -} - -func (x *HealthResponse) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *HealthResponse) GetVersion() string { - if x != nil { - return x.Version - } - return "" -} - -func (x *HealthResponse) GetUptimeSeconds() int64 { - if x != nil { - return x.UptimeSeconds - } - return 0 -} - -type SourceConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host,omitempty"` - Port int32 `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"` - User string `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` - Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password,omitempty"` - Database string `protobuf:"bytes,5,opt,name=database,proto3" json:"database,omitempty"` - SslMode string `protobuf:"bytes,6,opt,name=ssl_mode,json=sslMode,proto3" json:"ssl_mode,omitempty"` - Addr string `protobuf:"bytes,7,opt,name=addr,proto3" json:"addr,omitempty"` - Username string `protobuf:"bytes,8,opt,name=username,proto3" json:"username,omitempty"` - Db int32 `protobuf:"varint,9,opt,name=db,proto3" json:"db,omitempty"` - ContainerName string `protobuf:"bytes,10,opt,name=container_name,json=containerName,proto3" json:"container_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SourceConfig) Reset() { - *x = SourceConfig{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[2] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SourceConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceConfig) ProtoMessage() {} - -func (x *SourceConfig) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[2] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceConfig.ProtoReflect.Descriptor instead. -func (*SourceConfig) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{2} -} - -func (x *SourceConfig) GetHost() string { - if x != nil { - return x.Host - } - return "" -} - -func (x *SourceConfig) GetPort() int32 { - if x != nil { - return x.Port - } - return 0 -} - -func (x *SourceConfig) GetUser() string { - if x != nil { - return x.User - } - return "" -} - -func (x *SourceConfig) GetPassword() string { - if x != nil { - return x.Password - } - return "" -} - -func (x *SourceConfig) GetDatabase() string { - if x != nil { - return x.Database - } - return "" -} - -func (x *SourceConfig) GetSslMode() string { - if x != nil { - return x.SslMode - } - return "" -} - -func (x *SourceConfig) GetAddr() string { - if x != nil { - return x.Addr - } - return "" -} - -func (x *SourceConfig) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *SourceConfig) GetDb() int32 { - if x != nil { - return x.Db - } - return 0 -} - -func (x *SourceConfig) GetContainerName() string { - if x != nil { - return x.ContainerName - } - return "" -} - -type S3Config struct { - state protoimpl.MessageState `protogen:"open.v1"` - Enabled bool `protobuf:"varint,1,opt,name=enabled,proto3" json:"enabled,omitempty"` - Endpoint string `protobuf:"bytes,2,opt,name=endpoint,proto3" json:"endpoint,omitempty"` - Region string `protobuf:"bytes,3,opt,name=region,proto3" json:"region,omitempty"` - Bucket string `protobuf:"bytes,4,opt,name=bucket,proto3" json:"bucket,omitempty"` - AccessKeyId string `protobuf:"bytes,5,opt,name=access_key_id,json=accessKeyId,proto3" json:"access_key_id,omitempty"` - SecretAccessKey string `protobuf:"bytes,6,opt,name=secret_access_key,json=secretAccessKey,proto3" json:"secret_access_key,omitempty"` - Prefix string `protobuf:"bytes,7,opt,name=prefix,proto3" json:"prefix,omitempty"` - ForcePathStyle bool `protobuf:"varint,8,opt,name=force_path_style,json=forcePathStyle,proto3" json:"force_path_style,omitempty"` - UseSsl bool `protobuf:"varint,9,opt,name=use_ssl,json=useSsl,proto3" json:"use_ssl,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *S3Config) Reset() { - *x = S3Config{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[3] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *S3Config) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S3Config) ProtoMessage() {} - -func (x *S3Config) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[3] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S3Config.ProtoReflect.Descriptor instead. -func (*S3Config) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{3} -} - -func (x *S3Config) GetEnabled() bool { - if x != nil { - return x.Enabled - } - return false -} - -func (x *S3Config) GetEndpoint() string { - if x != nil { - return x.Endpoint - } - return "" -} - -func (x *S3Config) GetRegion() string { - if x != nil { - return x.Region - } - return "" -} - -func (x *S3Config) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *S3Config) GetAccessKeyId() string { - if x != nil { - return x.AccessKeyId - } - return "" -} - -func (x *S3Config) GetSecretAccessKey() string { - if x != nil { - return x.SecretAccessKey - } - return "" -} - -func (x *S3Config) GetPrefix() string { - if x != nil { - return x.Prefix - } - return "" -} - -func (x *S3Config) GetForcePathStyle() bool { - if x != nil { - return x.ForcePathStyle - } - return false -} - -func (x *S3Config) GetUseSsl() bool { - if x != nil { - return x.UseSsl - } - return false -} - -type BackupConfig struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceMode string `protobuf:"bytes,1,opt,name=source_mode,json=sourceMode,proto3" json:"source_mode,omitempty"` - BackupRoot string `protobuf:"bytes,2,opt,name=backup_root,json=backupRoot,proto3" json:"backup_root,omitempty"` - SqlitePath string `protobuf:"bytes,3,opt,name=sqlite_path,json=sqlitePath,proto3" json:"sqlite_path,omitempty"` - RetentionDays int32 `protobuf:"varint,4,opt,name=retention_days,json=retentionDays,proto3" json:"retention_days,omitempty"` - KeepLast int32 `protobuf:"varint,5,opt,name=keep_last,json=keepLast,proto3" json:"keep_last,omitempty"` - Postgres *SourceConfig `protobuf:"bytes,6,opt,name=postgres,proto3" json:"postgres,omitempty"` - Redis *SourceConfig `protobuf:"bytes,7,opt,name=redis,proto3" json:"redis,omitempty"` - S3 *S3Config `protobuf:"bytes,8,opt,name=s3,proto3" json:"s3,omitempty"` - ActiveS3ProfileId string `protobuf:"bytes,9,opt,name=active_s3_profile_id,json=activeS3ProfileId,proto3" json:"active_s3_profile_id,omitempty"` - ActivePostgresProfileId string `protobuf:"bytes,10,opt,name=active_postgres_profile_id,json=activePostgresProfileId,proto3" json:"active_postgres_profile_id,omitempty"` - ActiveRedisProfileId string `protobuf:"bytes,11,opt,name=active_redis_profile_id,json=activeRedisProfileId,proto3" json:"active_redis_profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BackupConfig) Reset() { - *x = BackupConfig{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[4] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BackupConfig) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BackupConfig) ProtoMessage() {} - -func (x *BackupConfig) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[4] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BackupConfig.ProtoReflect.Descriptor instead. -func (*BackupConfig) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{4} -} - -func (x *BackupConfig) GetSourceMode() string { - if x != nil { - return x.SourceMode - } - return "" -} - -func (x *BackupConfig) GetBackupRoot() string { - if x != nil { - return x.BackupRoot - } - return "" -} - -func (x *BackupConfig) GetSqlitePath() string { - if x != nil { - return x.SqlitePath - } - return "" -} - -func (x *BackupConfig) GetRetentionDays() int32 { - if x != nil { - return x.RetentionDays - } - return 0 -} - -func (x *BackupConfig) GetKeepLast() int32 { - if x != nil { - return x.KeepLast - } - return 0 -} - -func (x *BackupConfig) GetPostgres() *SourceConfig { - if x != nil { - return x.Postgres - } - return nil -} - -func (x *BackupConfig) GetRedis() *SourceConfig { - if x != nil { - return x.Redis - } - return nil -} - -func (x *BackupConfig) GetS3() *S3Config { - if x != nil { - return x.S3 - } - return nil -} - -func (x *BackupConfig) GetActiveS3ProfileId() string { - if x != nil { - return x.ActiveS3ProfileId - } - return "" -} - -func (x *BackupConfig) GetActivePostgresProfileId() string { - if x != nil { - return x.ActivePostgresProfileId - } - return "" -} - -func (x *BackupConfig) GetActiveRedisProfileId() string { - if x != nil { - return x.ActiveRedisProfileId - } - return "" -} - -type GetConfigRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetConfigRequest) Reset() { - *x = GetConfigRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetConfigRequest) ProtoMessage() {} - -func (x *GetConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetConfigRequest.ProtoReflect.Descriptor instead. -func (*GetConfigRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{5} -} - -type GetConfigResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetConfigResponse) Reset() { - *x = GetConfigResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[6] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetConfigResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetConfigResponse) ProtoMessage() {} - -func (x *GetConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[6] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetConfigResponse.ProtoReflect.Descriptor instead. -func (*GetConfigResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{6} -} - -func (x *GetConfigResponse) GetConfig() *BackupConfig { - if x != nil { - return x.Config - } - return nil -} - -type UpdateConfigRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateConfigRequest) Reset() { - *x = UpdateConfigRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[7] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateConfigRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateConfigRequest) ProtoMessage() {} - -func (x *UpdateConfigRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[7] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateConfigRequest.ProtoReflect.Descriptor instead. -func (*UpdateConfigRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{7} -} - -func (x *UpdateConfigRequest) GetConfig() *BackupConfig { - if x != nil { - return x.Config - } - return nil -} - -type UpdateConfigResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Config *BackupConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateConfigResponse) Reset() { - *x = UpdateConfigResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateConfigResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateConfigResponse) ProtoMessage() {} - -func (x *UpdateConfigResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[8] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateConfigResponse.ProtoReflect.Descriptor instead. -func (*UpdateConfigResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{8} -} - -func (x *UpdateConfigResponse) GetConfig() *BackupConfig { - if x != nil { - return x.Config - } - return nil -} - -type SourceProfile struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - IsActive bool `protobuf:"varint,4,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` - Config *SourceConfig `protobuf:"bytes,5,opt,name=config,proto3" json:"config,omitempty"` - PasswordConfigured bool `protobuf:"varint,6,opt,name=password_configured,json=passwordConfigured,proto3" json:"password_configured,omitempty"` - CreatedAt string `protobuf:"bytes,7,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - UpdatedAt string `protobuf:"bytes,8,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SourceProfile) Reset() { - *x = SourceProfile{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[9] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SourceProfile) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SourceProfile) ProtoMessage() {} - -func (x *SourceProfile) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[9] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SourceProfile.ProtoReflect.Descriptor instead. -func (*SourceProfile) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{9} -} - -func (x *SourceProfile) GetSourceType() string { - if x != nil { - return x.SourceType - } - return "" -} - -func (x *SourceProfile) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -func (x *SourceProfile) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *SourceProfile) GetIsActive() bool { - if x != nil { - return x.IsActive - } - return false -} - -func (x *SourceProfile) GetConfig() *SourceConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *SourceProfile) GetPasswordConfigured() bool { - if x != nil { - return x.PasswordConfigured - } - return false -} - -func (x *SourceProfile) GetCreatedAt() string { - if x != nil { - return x.CreatedAt - } - return "" -} - -func (x *SourceProfile) GetUpdatedAt() string { - if x != nil { - return x.UpdatedAt - } - return "" -} - -type ListSourceProfilesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListSourceProfilesRequest) Reset() { - *x = ListSourceProfilesRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[10] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListSourceProfilesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSourceProfilesRequest) ProtoMessage() {} - -func (x *ListSourceProfilesRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[10] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSourceProfilesRequest.ProtoReflect.Descriptor instead. -func (*ListSourceProfilesRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{10} -} - -func (x *ListSourceProfilesRequest) GetSourceType() string { - if x != nil { - return x.SourceType - } - return "" -} - -type ListSourceProfilesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Items []*SourceProfile `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListSourceProfilesResponse) Reset() { - *x = ListSourceProfilesResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[11] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListSourceProfilesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListSourceProfilesResponse) ProtoMessage() {} - -func (x *ListSourceProfilesResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[11] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListSourceProfilesResponse.ProtoReflect.Descriptor instead. -func (*ListSourceProfilesResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{11} -} - -func (x *ListSourceProfilesResponse) GetItems() []*SourceProfile { - if x != nil { - return x.Items - } - return nil -} - -type CreateSourceProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Config *SourceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - SetActive bool `protobuf:"varint,5,opt,name=set_active,json=setActive,proto3" json:"set_active,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateSourceProfileRequest) Reset() { - *x = CreateSourceProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[12] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateSourceProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateSourceProfileRequest) ProtoMessage() {} - -func (x *CreateSourceProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[12] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateSourceProfileRequest.ProtoReflect.Descriptor instead. -func (*CreateSourceProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{12} -} - -func (x *CreateSourceProfileRequest) GetSourceType() string { - if x != nil { - return x.SourceType - } - return "" -} - -func (x *CreateSourceProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -func (x *CreateSourceProfileRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateSourceProfileRequest) GetConfig() *SourceConfig { - if x != nil { - return x.Config - } - return nil -} - -func (x *CreateSourceProfileRequest) GetSetActive() bool { - if x != nil { - return x.SetActive - } - return false -} - -type CreateSourceProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateSourceProfileResponse) Reset() { - *x = CreateSourceProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[13] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateSourceProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateSourceProfileResponse) ProtoMessage() {} - -func (x *CreateSourceProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[13] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateSourceProfileResponse.ProtoReflect.Descriptor instead. -func (*CreateSourceProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{13} -} - -func (x *CreateSourceProfileResponse) GetProfile() *SourceProfile { - if x != nil { - return x.Profile - } - return nil -} - -type UpdateSourceProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Config *SourceConfig `protobuf:"bytes,4,opt,name=config,proto3" json:"config,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateSourceProfileRequest) Reset() { - *x = UpdateSourceProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateSourceProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateSourceProfileRequest) ProtoMessage() {} - -func (x *UpdateSourceProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateSourceProfileRequest.ProtoReflect.Descriptor instead. -func (*UpdateSourceProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{14} -} - -func (x *UpdateSourceProfileRequest) GetSourceType() string { - if x != nil { - return x.SourceType - } - return "" -} - -func (x *UpdateSourceProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -func (x *UpdateSourceProfileRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UpdateSourceProfileRequest) GetConfig() *SourceConfig { - if x != nil { - return x.Config - } - return nil -} - -type UpdateSourceProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateSourceProfileResponse) Reset() { - *x = UpdateSourceProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateSourceProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateSourceProfileResponse) ProtoMessage() {} - -func (x *UpdateSourceProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateSourceProfileResponse.ProtoReflect.Descriptor instead. -func (*UpdateSourceProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{15} -} - -func (x *UpdateSourceProfileResponse) GetProfile() *SourceProfile { - if x != nil { - return x.Profile - } - return nil -} - -type DeleteSourceProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DeleteSourceProfileRequest) Reset() { - *x = DeleteSourceProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[16] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeleteSourceProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteSourceProfileRequest) ProtoMessage() {} - -func (x *DeleteSourceProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[16] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteSourceProfileRequest.ProtoReflect.Descriptor instead. -func (*DeleteSourceProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{16} -} - -func (x *DeleteSourceProfileRequest) GetSourceType() string { - if x != nil { - return x.SourceType - } - return "" -} - -func (x *DeleteSourceProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -type DeleteSourceProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DeleteSourceProfileResponse) Reset() { - *x = DeleteSourceProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[17] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeleteSourceProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteSourceProfileResponse) ProtoMessage() {} - -func (x *DeleteSourceProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[17] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteSourceProfileResponse.ProtoReflect.Descriptor instead. -func (*DeleteSourceProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{17} -} - -type SetActiveSourceProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceType string `protobuf:"bytes,1,opt,name=source_type,json=sourceType,proto3" json:"source_type,omitempty"` - ProfileId string `protobuf:"bytes,2,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SetActiveSourceProfileRequest) Reset() { - *x = SetActiveSourceProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[18] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SetActiveSourceProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetActiveSourceProfileRequest) ProtoMessage() {} - -func (x *SetActiveSourceProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[18] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetActiveSourceProfileRequest.ProtoReflect.Descriptor instead. -func (*SetActiveSourceProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{18} -} - -func (x *SetActiveSourceProfileRequest) GetSourceType() string { - if x != nil { - return x.SourceType - } - return "" -} - -func (x *SetActiveSourceProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -type SetActiveSourceProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Profile *SourceProfile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SetActiveSourceProfileResponse) Reset() { - *x = SetActiveSourceProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[19] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SetActiveSourceProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetActiveSourceProfileResponse) ProtoMessage() {} - -func (x *SetActiveSourceProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[19] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetActiveSourceProfileResponse.ProtoReflect.Descriptor instead. -func (*SetActiveSourceProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{19} -} - -func (x *SetActiveSourceProfileResponse) GetProfile() *SourceProfile { - if x != nil { - return x.Profile - } - return nil -} - -type ValidateS3Request struct { - state protoimpl.MessageState `protogen:"open.v1"` - S3 *S3Config `protobuf:"bytes,1,opt,name=s3,proto3" json:"s3,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ValidateS3Request) Reset() { - *x = ValidateS3Request{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[20] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ValidateS3Request) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidateS3Request) ProtoMessage() {} - -func (x *ValidateS3Request) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[20] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidateS3Request.ProtoReflect.Descriptor instead. -func (*ValidateS3Request) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{20} -} - -func (x *ValidateS3Request) GetS3() *S3Config { - if x != nil { - return x.S3 - } - return nil -} - -type ValidateS3Response struct { - state protoimpl.MessageState `protogen:"open.v1"` - Ok bool `protobuf:"varint,1,opt,name=ok,proto3" json:"ok,omitempty"` - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ValidateS3Response) Reset() { - *x = ValidateS3Response{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[21] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ValidateS3Response) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ValidateS3Response) ProtoMessage() {} - -func (x *ValidateS3Response) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[21] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ValidateS3Response.ProtoReflect.Descriptor instead. -func (*ValidateS3Response) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{21} -} - -func (x *ValidateS3Response) GetOk() bool { - if x != nil { - return x.Ok - } - return false -} - -func (x *ValidateS3Response) GetMessage() string { - if x != nil { - return x.Message - } - return "" -} - -type S3Profile struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - IsActive bool `protobuf:"varint,3,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"` - S3 *S3Config `protobuf:"bytes,4,opt,name=s3,proto3" json:"s3,omitempty"` - SecretAccessKeyConfigured bool `protobuf:"varint,5,opt,name=secret_access_key_configured,json=secretAccessKeyConfigured,proto3" json:"secret_access_key_configured,omitempty"` - CreatedAt string `protobuf:"bytes,6,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` - UpdatedAt string `protobuf:"bytes,7,opt,name=updated_at,json=updatedAt,proto3" json:"updated_at,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *S3Profile) Reset() { - *x = S3Profile{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[22] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *S3Profile) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*S3Profile) ProtoMessage() {} - -func (x *S3Profile) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[22] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use S3Profile.ProtoReflect.Descriptor instead. -func (*S3Profile) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{22} -} - -func (x *S3Profile) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -func (x *S3Profile) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *S3Profile) GetIsActive() bool { - if x != nil { - return x.IsActive - } - return false -} - -func (x *S3Profile) GetS3() *S3Config { - if x != nil { - return x.S3 - } - return nil -} - -func (x *S3Profile) GetSecretAccessKeyConfigured() bool { - if x != nil { - return x.SecretAccessKeyConfigured - } - return false -} - -func (x *S3Profile) GetCreatedAt() string { - if x != nil { - return x.CreatedAt - } - return "" -} - -func (x *S3Profile) GetUpdatedAt() string { - if x != nil { - return x.UpdatedAt - } - return "" -} - -type ListS3ProfilesRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListS3ProfilesRequest) Reset() { - *x = ListS3ProfilesRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[23] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListS3ProfilesRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListS3ProfilesRequest) ProtoMessage() {} - -func (x *ListS3ProfilesRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[23] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListS3ProfilesRequest.ProtoReflect.Descriptor instead. -func (*ListS3ProfilesRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{23} -} - -type ListS3ProfilesResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Items []*S3Profile `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListS3ProfilesResponse) Reset() { - *x = ListS3ProfilesResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[24] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListS3ProfilesResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListS3ProfilesResponse) ProtoMessage() {} - -func (x *ListS3ProfilesResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[24] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListS3ProfilesResponse.ProtoReflect.Descriptor instead. -func (*ListS3ProfilesResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{24} -} - -func (x *ListS3ProfilesResponse) GetItems() []*S3Profile { - if x != nil { - return x.Items - } - return nil -} - -type CreateS3ProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - S3 *S3Config `protobuf:"bytes,3,opt,name=s3,proto3" json:"s3,omitempty"` - SetActive bool `protobuf:"varint,4,opt,name=set_active,json=setActive,proto3" json:"set_active,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateS3ProfileRequest) Reset() { - *x = CreateS3ProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[25] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateS3ProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateS3ProfileRequest) ProtoMessage() {} - -func (x *CreateS3ProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[25] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateS3ProfileRequest.ProtoReflect.Descriptor instead. -func (*CreateS3ProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{25} -} - -func (x *CreateS3ProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -func (x *CreateS3ProfileRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *CreateS3ProfileRequest) GetS3() *S3Config { - if x != nil { - return x.S3 - } - return nil -} - -func (x *CreateS3ProfileRequest) GetSetActive() bool { - if x != nil { - return x.SetActive - } - return false -} - -type CreateS3ProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateS3ProfileResponse) Reset() { - *x = CreateS3ProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[26] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateS3ProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateS3ProfileResponse) ProtoMessage() {} - -func (x *CreateS3ProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[26] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateS3ProfileResponse.ProtoReflect.Descriptor instead. -func (*CreateS3ProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{26} -} - -func (x *CreateS3ProfileResponse) GetProfile() *S3Profile { - if x != nil { - return x.Profile - } - return nil -} - -type UpdateS3ProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - S3 *S3Config `protobuf:"bytes,3,opt,name=s3,proto3" json:"s3,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateS3ProfileRequest) Reset() { - *x = UpdateS3ProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[27] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateS3ProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateS3ProfileRequest) ProtoMessage() {} - -func (x *UpdateS3ProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[27] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateS3ProfileRequest.ProtoReflect.Descriptor instead. -func (*UpdateS3ProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{27} -} - -func (x *UpdateS3ProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -func (x *UpdateS3ProfileRequest) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *UpdateS3ProfileRequest) GetS3() *S3Config { - if x != nil { - return x.S3 - } - return nil -} - -type UpdateS3ProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *UpdateS3ProfileResponse) Reset() { - *x = UpdateS3ProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[28] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *UpdateS3ProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*UpdateS3ProfileResponse) ProtoMessage() {} - -func (x *UpdateS3ProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[28] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use UpdateS3ProfileResponse.ProtoReflect.Descriptor instead. -func (*UpdateS3ProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{28} -} - -func (x *UpdateS3ProfileResponse) GetProfile() *S3Profile { - if x != nil { - return x.Profile - } - return nil -} - -type DeleteS3ProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DeleteS3ProfileRequest) Reset() { - *x = DeleteS3ProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[29] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeleteS3ProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteS3ProfileRequest) ProtoMessage() {} - -func (x *DeleteS3ProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[29] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteS3ProfileRequest.ProtoReflect.Descriptor instead. -func (*DeleteS3ProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{29} -} - -func (x *DeleteS3ProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -type DeleteS3ProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *DeleteS3ProfileResponse) Reset() { - *x = DeleteS3ProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[30] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeleteS3ProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeleteS3ProfileResponse) ProtoMessage() {} - -func (x *DeleteS3ProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[30] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeleteS3ProfileResponse.ProtoReflect.Descriptor instead. -func (*DeleteS3ProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{30} -} - -type SetActiveS3ProfileRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - ProfileId string `protobuf:"bytes,1,opt,name=profile_id,json=profileId,proto3" json:"profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SetActiveS3ProfileRequest) Reset() { - *x = SetActiveS3ProfileRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[31] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SetActiveS3ProfileRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetActiveS3ProfileRequest) ProtoMessage() {} - -func (x *SetActiveS3ProfileRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[31] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetActiveS3ProfileRequest.ProtoReflect.Descriptor instead. -func (*SetActiveS3ProfileRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{31} -} - -func (x *SetActiveS3ProfileRequest) GetProfileId() string { - if x != nil { - return x.ProfileId - } - return "" -} - -type SetActiveS3ProfileResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Profile *S3Profile `protobuf:"bytes,1,opt,name=profile,proto3" json:"profile,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *SetActiveS3ProfileResponse) Reset() { - *x = SetActiveS3ProfileResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[32] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *SetActiveS3ProfileResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*SetActiveS3ProfileResponse) ProtoMessage() {} - -func (x *SetActiveS3ProfileResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[32] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use SetActiveS3ProfileResponse.ProtoReflect.Descriptor instead. -func (*SetActiveS3ProfileResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{32} -} - -func (x *SetActiveS3ProfileResponse) GetProfile() *S3Profile { - if x != nil { - return x.Profile - } - return nil -} - -type CreateBackupJobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - BackupType string `protobuf:"bytes,1,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - UploadToS3 bool `protobuf:"varint,2,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` - TriggeredBy string `protobuf:"bytes,3,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` - IdempotencyKey string `protobuf:"bytes,4,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` - S3ProfileId string `protobuf:"bytes,5,opt,name=s3_profile_id,json=s3ProfileId,proto3" json:"s3_profile_id,omitempty"` - PostgresProfileId string `protobuf:"bytes,6,opt,name=postgres_profile_id,json=postgresProfileId,proto3" json:"postgres_profile_id,omitempty"` - RedisProfileId string `protobuf:"bytes,7,opt,name=redis_profile_id,json=redisProfileId,proto3" json:"redis_profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateBackupJobRequest) Reset() { - *x = CreateBackupJobRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[33] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateBackupJobRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateBackupJobRequest) ProtoMessage() {} - -func (x *CreateBackupJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[33] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateBackupJobRequest.ProtoReflect.Descriptor instead. -func (*CreateBackupJobRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{33} -} - -func (x *CreateBackupJobRequest) GetBackupType() string { - if x != nil { - return x.BackupType - } - return "" -} - -func (x *CreateBackupJobRequest) GetUploadToS3() bool { - if x != nil { - return x.UploadToS3 - } - return false -} - -func (x *CreateBackupJobRequest) GetTriggeredBy() string { - if x != nil { - return x.TriggeredBy - } - return "" -} - -func (x *CreateBackupJobRequest) GetIdempotencyKey() string { - if x != nil { - return x.IdempotencyKey - } - return "" -} - -func (x *CreateBackupJobRequest) GetS3ProfileId() string { - if x != nil { - return x.S3ProfileId - } - return "" -} - -func (x *CreateBackupJobRequest) GetPostgresProfileId() string { - if x != nil { - return x.PostgresProfileId - } - return "" -} - -func (x *CreateBackupJobRequest) GetRedisProfileId() string { - if x != nil { - return x.RedisProfileId - } - return "" -} - -type BackupArtifact struct { - state protoimpl.MessageState `protogen:"open.v1"` - LocalPath string `protobuf:"bytes,1,opt,name=local_path,json=localPath,proto3" json:"local_path,omitempty"` - SizeBytes int64 `protobuf:"varint,2,opt,name=size_bytes,json=sizeBytes,proto3" json:"size_bytes,omitempty"` - Sha256 string `protobuf:"bytes,3,opt,name=sha256,proto3" json:"sha256,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BackupArtifact) Reset() { - *x = BackupArtifact{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[34] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BackupArtifact) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BackupArtifact) ProtoMessage() {} - -func (x *BackupArtifact) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[34] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BackupArtifact.ProtoReflect.Descriptor instead. -func (*BackupArtifact) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{34} -} - -func (x *BackupArtifact) GetLocalPath() string { - if x != nil { - return x.LocalPath - } - return "" -} - -func (x *BackupArtifact) GetSizeBytes() int64 { - if x != nil { - return x.SizeBytes - } - return 0 -} - -func (x *BackupArtifact) GetSha256() string { - if x != nil { - return x.Sha256 - } - return "" -} - -type BackupS3Object struct { - state protoimpl.MessageState `protogen:"open.v1"` - Bucket string `protobuf:"bytes,1,opt,name=bucket,proto3" json:"bucket,omitempty"` - Key string `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` - Etag string `protobuf:"bytes,3,opt,name=etag,proto3" json:"etag,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BackupS3Object) Reset() { - *x = BackupS3Object{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[35] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BackupS3Object) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BackupS3Object) ProtoMessage() {} - -func (x *BackupS3Object) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[35] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BackupS3Object.ProtoReflect.Descriptor instead. -func (*BackupS3Object) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{35} -} - -func (x *BackupS3Object) GetBucket() string { - if x != nil { - return x.Bucket - } - return "" -} - -func (x *BackupS3Object) GetKey() string { - if x != nil { - return x.Key - } - return "" -} - -func (x *BackupS3Object) GetEtag() string { - if x != nil { - return x.Etag - } - return "" -} - -type BackupJob struct { - state protoimpl.MessageState `protogen:"open.v1"` - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` - BackupType string `protobuf:"bytes,2,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - TriggeredBy string `protobuf:"bytes,4,opt,name=triggered_by,json=triggeredBy,proto3" json:"triggered_by,omitempty"` - IdempotencyKey string `protobuf:"bytes,5,opt,name=idempotency_key,json=idempotencyKey,proto3" json:"idempotency_key,omitempty"` - UploadToS3 bool `protobuf:"varint,6,opt,name=upload_to_s3,json=uploadToS3,proto3" json:"upload_to_s3,omitempty"` - StartedAt string `protobuf:"bytes,7,opt,name=started_at,json=startedAt,proto3" json:"started_at,omitempty"` - FinishedAt string `protobuf:"bytes,8,opt,name=finished_at,json=finishedAt,proto3" json:"finished_at,omitempty"` - ErrorMessage string `protobuf:"bytes,9,opt,name=error_message,json=errorMessage,proto3" json:"error_message,omitempty"` - Artifact *BackupArtifact `protobuf:"bytes,10,opt,name=artifact,proto3" json:"artifact,omitempty"` - S3Object *BackupS3Object `protobuf:"bytes,11,opt,name=s3_object,json=s3Object,proto3" json:"s3_object,omitempty"` - S3ProfileId string `protobuf:"bytes,12,opt,name=s3_profile_id,json=s3ProfileId,proto3" json:"s3_profile_id,omitempty"` - PostgresProfileId string `protobuf:"bytes,13,opt,name=postgres_profile_id,json=postgresProfileId,proto3" json:"postgres_profile_id,omitempty"` - RedisProfileId string `protobuf:"bytes,14,opt,name=redis_profile_id,json=redisProfileId,proto3" json:"redis_profile_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BackupJob) Reset() { - *x = BackupJob{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[36] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BackupJob) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BackupJob) ProtoMessage() {} - -func (x *BackupJob) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[36] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BackupJob.ProtoReflect.Descriptor instead. -func (*BackupJob) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{36} -} - -func (x *BackupJob) GetJobId() string { - if x != nil { - return x.JobId - } - return "" -} - -func (x *BackupJob) GetBackupType() string { - if x != nil { - return x.BackupType - } - return "" -} - -func (x *BackupJob) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *BackupJob) GetTriggeredBy() string { - if x != nil { - return x.TriggeredBy - } - return "" -} - -func (x *BackupJob) GetIdempotencyKey() string { - if x != nil { - return x.IdempotencyKey - } - return "" -} - -func (x *BackupJob) GetUploadToS3() bool { - if x != nil { - return x.UploadToS3 - } - return false -} - -func (x *BackupJob) GetStartedAt() string { - if x != nil { - return x.StartedAt - } - return "" -} - -func (x *BackupJob) GetFinishedAt() string { - if x != nil { - return x.FinishedAt - } - return "" -} - -func (x *BackupJob) GetErrorMessage() string { - if x != nil { - return x.ErrorMessage - } - return "" -} - -func (x *BackupJob) GetArtifact() *BackupArtifact { - if x != nil { - return x.Artifact - } - return nil -} - -func (x *BackupJob) GetS3Object() *BackupS3Object { - if x != nil { - return x.S3Object - } - return nil -} - -func (x *BackupJob) GetS3ProfileId() string { - if x != nil { - return x.S3ProfileId - } - return "" -} - -func (x *BackupJob) GetPostgresProfileId() string { - if x != nil { - return x.PostgresProfileId - } - return "" -} - -func (x *BackupJob) GetRedisProfileId() string { - if x != nil { - return x.RedisProfileId - } - return "" -} - -type CreateBackupJobResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateBackupJobResponse) Reset() { - *x = CreateBackupJobResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[37] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateBackupJobResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateBackupJobResponse) ProtoMessage() {} - -func (x *CreateBackupJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[37] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateBackupJobResponse.ProtoReflect.Descriptor instead. -func (*CreateBackupJobResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{37} -} - -func (x *CreateBackupJobResponse) GetJob() *BackupJob { - if x != nil { - return x.Job - } - return nil -} - -type ListBackupJobsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - PageSize int32 `protobuf:"varint,1,opt,name=page_size,json=pageSize,proto3" json:"page_size,omitempty"` - PageToken string `protobuf:"bytes,2,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"` - BackupType string `protobuf:"bytes,4,opt,name=backup_type,json=backupType,proto3" json:"backup_type,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListBackupJobsRequest) Reset() { - *x = ListBackupJobsRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[38] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListBackupJobsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBackupJobsRequest) ProtoMessage() {} - -func (x *ListBackupJobsRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[38] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBackupJobsRequest.ProtoReflect.Descriptor instead. -func (*ListBackupJobsRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{38} -} - -func (x *ListBackupJobsRequest) GetPageSize() int32 { - if x != nil { - return x.PageSize - } - return 0 -} - -func (x *ListBackupJobsRequest) GetPageToken() string { - if x != nil { - return x.PageToken - } - return "" -} - -func (x *ListBackupJobsRequest) GetStatus() string { - if x != nil { - return x.Status - } - return "" -} - -func (x *ListBackupJobsRequest) GetBackupType() string { - if x != nil { - return x.BackupType - } - return "" -} - -type ListBackupJobsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Items []*BackupJob `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"` - NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *ListBackupJobsResponse) Reset() { - *x = ListBackupJobsResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[39] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ListBackupJobsResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ListBackupJobsResponse) ProtoMessage() {} - -func (x *ListBackupJobsResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[39] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use ListBackupJobsResponse.ProtoReflect.Descriptor instead. -func (*ListBackupJobsResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{39} -} - -func (x *ListBackupJobsResponse) GetItems() []*BackupJob { - if x != nil { - return x.Items - } - return nil -} - -func (x *ListBackupJobsResponse) GetNextPageToken() string { - if x != nil { - return x.NextPageToken - } - return "" -} - -type GetBackupJobRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - JobId string `protobuf:"bytes,1,opt,name=job_id,json=jobId,proto3" json:"job_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetBackupJobRequest) Reset() { - *x = GetBackupJobRequest{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[40] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetBackupJobRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetBackupJobRequest) ProtoMessage() {} - -func (x *GetBackupJobRequest) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[40] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetBackupJobRequest.ProtoReflect.Descriptor instead. -func (*GetBackupJobRequest) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{40} -} - -func (x *GetBackupJobRequest) GetJobId() string { - if x != nil { - return x.JobId - } - return "" -} - -type GetBackupJobResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - Job *BackupJob `protobuf:"bytes,1,opt,name=job,proto3" json:"job,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *GetBackupJobResponse) Reset() { - *x = GetBackupJobResponse{} - mi := &file_proto_backup_v1_backup_proto_msgTypes[41] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *GetBackupJobResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*GetBackupJobResponse) ProtoMessage() {} - -func (x *GetBackupJobResponse) ProtoReflect() protoreflect.Message { - mi := &file_proto_backup_v1_backup_proto_msgTypes[41] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use GetBackupJobResponse.ProtoReflect.Descriptor instead. -func (*GetBackupJobResponse) Descriptor() ([]byte, []int) { - return file_proto_backup_v1_backup_proto_rawDescGZIP(), []int{41} -} - -func (x *GetBackupJobResponse) GetJob() *BackupJob { - if x != nil { - return x.Job - } - return nil -} - -var File_proto_backup_v1_backup_proto protoreflect.FileDescriptor - -const file_proto_backup_v1_backup_proto_rawDesc = "" + - "\n" + - "\x1cproto/backup/v1/backup.proto\x12\tbackup.v1\"\x0f\n" + - "\rHealthRequest\"i\n" + - "\x0eHealthResponse\x12\x16\n" + - "\x06status\x18\x01 \x01(\tR\x06status\x12\x18\n" + - "\aversion\x18\x02 \x01(\tR\aversion\x12%\n" + - "\x0euptime_seconds\x18\x03 \x01(\x03R\ruptimeSeconds\"\x84\x02\n" + - "\fSourceConfig\x12\x12\n" + - "\x04host\x18\x01 \x01(\tR\x04host\x12\x12\n" + - "\x04port\x18\x02 \x01(\x05R\x04port\x12\x12\n" + - "\x04user\x18\x03 \x01(\tR\x04user\x12\x1a\n" + - "\bpassword\x18\x04 \x01(\tR\bpassword\x12\x1a\n" + - "\bdatabase\x18\x05 \x01(\tR\bdatabase\x12\x19\n" + - "\bssl_mode\x18\x06 \x01(\tR\asslMode\x12\x12\n" + - "\x04addr\x18\a \x01(\tR\x04addr\x12\x1a\n" + - "\busername\x18\b \x01(\tR\busername\x12\x0e\n" + - "\x02db\x18\t \x01(\x05R\x02db\x12%\n" + - "\x0econtainer_name\x18\n" + - " \x01(\tR\rcontainerName\"\x9b\x02\n" + - "\bS3Config\x12\x18\n" + - "\aenabled\x18\x01 \x01(\bR\aenabled\x12\x1a\n" + - "\bendpoint\x18\x02 \x01(\tR\bendpoint\x12\x16\n" + - "\x06region\x18\x03 \x01(\tR\x06region\x12\x16\n" + - "\x06bucket\x18\x04 \x01(\tR\x06bucket\x12\"\n" + - "\raccess_key_id\x18\x05 \x01(\tR\vaccessKeyId\x12*\n" + - "\x11secret_access_key\x18\x06 \x01(\tR\x0fsecretAccessKey\x12\x16\n" + - "\x06prefix\x18\a \x01(\tR\x06prefix\x12(\n" + - "\x10force_path_style\x18\b \x01(\bR\x0eforcePathStyle\x12\x17\n" + - "\ause_ssl\x18\t \x01(\bR\x06useSsl\"\xe3\x03\n" + - "\fBackupConfig\x12\x1f\n" + - "\vsource_mode\x18\x01 \x01(\tR\n" + - "sourceMode\x12\x1f\n" + - "\vbackup_root\x18\x02 \x01(\tR\n" + - "backupRoot\x12\x1f\n" + - "\vsqlite_path\x18\x03 \x01(\tR\n" + - "sqlitePath\x12%\n" + - "\x0eretention_days\x18\x04 \x01(\x05R\rretentionDays\x12\x1b\n" + - "\tkeep_last\x18\x05 \x01(\x05R\bkeepLast\x123\n" + - "\bpostgres\x18\x06 \x01(\v2\x17.backup.v1.SourceConfigR\bpostgres\x12-\n" + - "\x05redis\x18\a \x01(\v2\x17.backup.v1.SourceConfigR\x05redis\x12#\n" + - "\x02s3\x18\b \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12/\n" + - "\x14active_s3_profile_id\x18\t \x01(\tR\x11activeS3ProfileId\x12;\n" + - "\x1aactive_postgres_profile_id\x18\n" + - " \x01(\tR\x17activePostgresProfileId\x125\n" + - "\x17active_redis_profile_id\x18\v \x01(\tR\x14activeRedisProfileId\"\x12\n" + - "\x10GetConfigRequest\"D\n" + - "\x11GetConfigResponse\x12/\n" + - "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"F\n" + - "\x13UpdateConfigRequest\x12/\n" + - "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"G\n" + - "\x14UpdateConfigResponse\x12/\n" + - "\x06config\x18\x01 \x01(\v2\x17.backup.v1.BackupConfigR\x06config\"\xa0\x02\n" + - "\rSourceProfile\x12\x1f\n" + - "\vsource_type\x18\x01 \x01(\tR\n" + - "sourceType\x12\x1d\n" + - "\n" + - "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + - "\x04name\x18\x03 \x01(\tR\x04name\x12\x1b\n" + - "\tis_active\x18\x04 \x01(\bR\bisActive\x12/\n" + - "\x06config\x18\x05 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\x12/\n" + - "\x13password_configured\x18\x06 \x01(\bR\x12passwordConfigured\x12\x1d\n" + - "\n" + - "created_at\x18\a \x01(\tR\tcreatedAt\x12\x1d\n" + - "\n" + - "updated_at\x18\b \x01(\tR\tupdatedAt\"<\n" + - "\x19ListSourceProfilesRequest\x12\x1f\n" + - "\vsource_type\x18\x01 \x01(\tR\n" + - "sourceType\"L\n" + - "\x1aListSourceProfilesResponse\x12.\n" + - "\x05items\x18\x01 \x03(\v2\x18.backup.v1.SourceProfileR\x05items\"\xc0\x01\n" + - "\x1aCreateSourceProfileRequest\x12\x1f\n" + - "\vsource_type\x18\x01 \x01(\tR\n" + - "sourceType\x12\x1d\n" + - "\n" + - "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + - "\x04name\x18\x03 \x01(\tR\x04name\x12/\n" + - "\x06config\x18\x04 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\x12\x1d\n" + - "\n" + - "set_active\x18\x05 \x01(\bR\tsetActive\"Q\n" + - "\x1bCreateSourceProfileResponse\x122\n" + - "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"\xa1\x01\n" + - "\x1aUpdateSourceProfileRequest\x12\x1f\n" + - "\vsource_type\x18\x01 \x01(\tR\n" + - "sourceType\x12\x1d\n" + - "\n" + - "profile_id\x18\x02 \x01(\tR\tprofileId\x12\x12\n" + - "\x04name\x18\x03 \x01(\tR\x04name\x12/\n" + - "\x06config\x18\x04 \x01(\v2\x17.backup.v1.SourceConfigR\x06config\"Q\n" + - "\x1bUpdateSourceProfileResponse\x122\n" + - "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"\\\n" + - "\x1aDeleteSourceProfileRequest\x12\x1f\n" + - "\vsource_type\x18\x01 \x01(\tR\n" + - "sourceType\x12\x1d\n" + - "\n" + - "profile_id\x18\x02 \x01(\tR\tprofileId\"\x1d\n" + - "\x1bDeleteSourceProfileResponse\"_\n" + - "\x1dSetActiveSourceProfileRequest\x12\x1f\n" + - "\vsource_type\x18\x01 \x01(\tR\n" + - "sourceType\x12\x1d\n" + - "\n" + - "profile_id\x18\x02 \x01(\tR\tprofileId\"T\n" + - "\x1eSetActiveSourceProfileResponse\x122\n" + - "\aprofile\x18\x01 \x01(\v2\x18.backup.v1.SourceProfileR\aprofile\"8\n" + - "\x11ValidateS3Request\x12#\n" + - "\x02s3\x18\x01 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\">\n" + - "\x12ValidateS3Response\x12\x0e\n" + - "\x02ok\x18\x01 \x01(\bR\x02ok\x12\x18\n" + - "\amessage\x18\x02 \x01(\tR\amessage\"\xff\x01\n" + - "\tS3Profile\x12\x1d\n" + - "\n" + - "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12\x1b\n" + - "\tis_active\x18\x03 \x01(\bR\bisActive\x12#\n" + - "\x02s3\x18\x04 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12?\n" + - "\x1csecret_access_key_configured\x18\x05 \x01(\bR\x19secretAccessKeyConfigured\x12\x1d\n" + - "\n" + - "created_at\x18\x06 \x01(\tR\tcreatedAt\x12\x1d\n" + - "\n" + - "updated_at\x18\a \x01(\tR\tupdatedAt\"\x17\n" + - "\x15ListS3ProfilesRequest\"D\n" + - "\x16ListS3ProfilesResponse\x12*\n" + - "\x05items\x18\x01 \x03(\v2\x14.backup.v1.S3ProfileR\x05items\"\x8f\x01\n" + - "\x16CreateS3ProfileRequest\x12\x1d\n" + - "\n" + - "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12#\n" + - "\x02s3\x18\x03 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\x12\x1d\n" + - "\n" + - "set_active\x18\x04 \x01(\bR\tsetActive\"I\n" + - "\x17CreateS3ProfileResponse\x12.\n" + - "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"p\n" + - "\x16UpdateS3ProfileRequest\x12\x1d\n" + - "\n" + - "profile_id\x18\x01 \x01(\tR\tprofileId\x12\x12\n" + - "\x04name\x18\x02 \x01(\tR\x04name\x12#\n" + - "\x02s3\x18\x03 \x01(\v2\x13.backup.v1.S3ConfigR\x02s3\"I\n" + - "\x17UpdateS3ProfileResponse\x12.\n" + - "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"7\n" + - "\x16DeleteS3ProfileRequest\x12\x1d\n" + - "\n" + - "profile_id\x18\x01 \x01(\tR\tprofileId\"\x19\n" + - "\x17DeleteS3ProfileResponse\":\n" + - "\x19SetActiveS3ProfileRequest\x12\x1d\n" + - "\n" + - "profile_id\x18\x01 \x01(\tR\tprofileId\"L\n" + - "\x1aSetActiveS3ProfileResponse\x12.\n" + - "\aprofile\x18\x01 \x01(\v2\x14.backup.v1.S3ProfileR\aprofile\"\xa5\x02\n" + - "\x16CreateBackupJobRequest\x12\x1f\n" + - "\vbackup_type\x18\x01 \x01(\tR\n" + - "backupType\x12 \n" + - "\fupload_to_s3\x18\x02 \x01(\bR\n" + - "uploadToS3\x12!\n" + - "\ftriggered_by\x18\x03 \x01(\tR\vtriggeredBy\x12'\n" + - "\x0fidempotency_key\x18\x04 \x01(\tR\x0eidempotencyKey\x12\"\n" + - "\rs3_profile_id\x18\x05 \x01(\tR\vs3ProfileId\x12.\n" + - "\x13postgres_profile_id\x18\x06 \x01(\tR\x11postgresProfileId\x12(\n" + - "\x10redis_profile_id\x18\a \x01(\tR\x0eredisProfileId\"f\n" + - "\x0eBackupArtifact\x12\x1d\n" + - "\n" + - "local_path\x18\x01 \x01(\tR\tlocalPath\x12\x1d\n" + - "\n" + - "size_bytes\x18\x02 \x01(\x03R\tsizeBytes\x12\x16\n" + - "\x06sha256\x18\x03 \x01(\tR\x06sha256\"N\n" + - "\x0eBackupS3Object\x12\x16\n" + - "\x06bucket\x18\x01 \x01(\tR\x06bucket\x12\x10\n" + - "\x03key\x18\x02 \x01(\tR\x03key\x12\x12\n" + - "\x04etag\x18\x03 \x01(\tR\x04etag\"\x9b\x04\n" + - "\tBackupJob\x12\x15\n" + - "\x06job_id\x18\x01 \x01(\tR\x05jobId\x12\x1f\n" + - "\vbackup_type\x18\x02 \x01(\tR\n" + - "backupType\x12\x16\n" + - "\x06status\x18\x03 \x01(\tR\x06status\x12!\n" + - "\ftriggered_by\x18\x04 \x01(\tR\vtriggeredBy\x12'\n" + - "\x0fidempotency_key\x18\x05 \x01(\tR\x0eidempotencyKey\x12 \n" + - "\fupload_to_s3\x18\x06 \x01(\bR\n" + - "uploadToS3\x12\x1d\n" + - "\n" + - "started_at\x18\a \x01(\tR\tstartedAt\x12\x1f\n" + - "\vfinished_at\x18\b \x01(\tR\n" + - "finishedAt\x12#\n" + - "\rerror_message\x18\t \x01(\tR\ferrorMessage\x125\n" + - "\bartifact\x18\n" + - " \x01(\v2\x19.backup.v1.BackupArtifactR\bartifact\x126\n" + - "\ts3_object\x18\v \x01(\v2\x19.backup.v1.BackupS3ObjectR\bs3Object\x12\"\n" + - "\rs3_profile_id\x18\f \x01(\tR\vs3ProfileId\x12.\n" + - "\x13postgres_profile_id\x18\r \x01(\tR\x11postgresProfileId\x12(\n" + - "\x10redis_profile_id\x18\x0e \x01(\tR\x0eredisProfileId\"A\n" + - "\x17CreateBackupJobResponse\x12&\n" + - "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job\"\x8c\x01\n" + - "\x15ListBackupJobsRequest\x12\x1b\n" + - "\tpage_size\x18\x01 \x01(\x05R\bpageSize\x12\x1d\n" + - "\n" + - "page_token\x18\x02 \x01(\tR\tpageToken\x12\x16\n" + - "\x06status\x18\x03 \x01(\tR\x06status\x12\x1f\n" + - "\vbackup_type\x18\x04 \x01(\tR\n" + - "backupType\"l\n" + - "\x16ListBackupJobsResponse\x12*\n" + - "\x05items\x18\x01 \x03(\v2\x14.backup.v1.BackupJobR\x05items\x12&\n" + - "\x0fnext_page_token\x18\x02 \x01(\tR\rnextPageToken\",\n" + - "\x13GetBackupJobRequest\x12\x15\n" + - "\x06job_id\x18\x01 \x01(\tR\x05jobId\">\n" + - "\x14GetBackupJobResponse\x12&\n" + - "\x03job\x18\x01 \x01(\v2\x14.backup.v1.BackupJobR\x03job2\x80\f\n" + - "\rBackupService\x12=\n" + - "\x06Health\x12\x18.backup.v1.HealthRequest\x1a\x19.backup.v1.HealthResponse\x12F\n" + - "\tGetConfig\x12\x1b.backup.v1.GetConfigRequest\x1a\x1c.backup.v1.GetConfigResponse\x12O\n" + - "\fUpdateConfig\x12\x1e.backup.v1.UpdateConfigRequest\x1a\x1f.backup.v1.UpdateConfigResponse\x12a\n" + - "\x12ListSourceProfiles\x12$.backup.v1.ListSourceProfilesRequest\x1a%.backup.v1.ListSourceProfilesResponse\x12d\n" + - "\x13CreateSourceProfile\x12%.backup.v1.CreateSourceProfileRequest\x1a&.backup.v1.CreateSourceProfileResponse\x12d\n" + - "\x13UpdateSourceProfile\x12%.backup.v1.UpdateSourceProfileRequest\x1a&.backup.v1.UpdateSourceProfileResponse\x12d\n" + - "\x13DeleteSourceProfile\x12%.backup.v1.DeleteSourceProfileRequest\x1a&.backup.v1.DeleteSourceProfileResponse\x12m\n" + - "\x16SetActiveSourceProfile\x12(.backup.v1.SetActiveSourceProfileRequest\x1a).backup.v1.SetActiveSourceProfileResponse\x12I\n" + - "\n" + - "ValidateS3\x12\x1c.backup.v1.ValidateS3Request\x1a\x1d.backup.v1.ValidateS3Response\x12U\n" + - "\x0eListS3Profiles\x12 .backup.v1.ListS3ProfilesRequest\x1a!.backup.v1.ListS3ProfilesResponse\x12X\n" + - "\x0fCreateS3Profile\x12!.backup.v1.CreateS3ProfileRequest\x1a\".backup.v1.CreateS3ProfileResponse\x12X\n" + - "\x0fUpdateS3Profile\x12!.backup.v1.UpdateS3ProfileRequest\x1a\".backup.v1.UpdateS3ProfileResponse\x12X\n" + - "\x0fDeleteS3Profile\x12!.backup.v1.DeleteS3ProfileRequest\x1a\".backup.v1.DeleteS3ProfileResponse\x12a\n" + - "\x12SetActiveS3Profile\x12$.backup.v1.SetActiveS3ProfileRequest\x1a%.backup.v1.SetActiveS3ProfileResponse\x12X\n" + - "\x0fCreateBackupJob\x12!.backup.v1.CreateBackupJobRequest\x1a\".backup.v1.CreateBackupJobResponse\x12U\n" + - "\x0eListBackupJobs\x12 .backup.v1.ListBackupJobsRequest\x1a!.backup.v1.ListBackupJobsResponse\x12O\n" + - "\fGetBackupJob\x12\x1e.backup.v1.GetBackupJobRequest\x1a\x1f.backup.v1.GetBackupJobResponseB=Z;github.com/Wei-Shaw/sub2api/backup/proto/backup/v1;backupv1b\x06proto3" - -var ( - file_proto_backup_v1_backup_proto_rawDescOnce sync.Once - file_proto_backup_v1_backup_proto_rawDescData []byte -) - -func file_proto_backup_v1_backup_proto_rawDescGZIP() []byte { - file_proto_backup_v1_backup_proto_rawDescOnce.Do(func() { - file_proto_backup_v1_backup_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc))) - }) - return file_proto_backup_v1_backup_proto_rawDescData -} - -var file_proto_backup_v1_backup_proto_msgTypes = make([]protoimpl.MessageInfo, 42) -var file_proto_backup_v1_backup_proto_goTypes = []any{ - (*HealthRequest)(nil), // 0: backup.v1.HealthRequest - (*HealthResponse)(nil), // 1: backup.v1.HealthResponse - (*SourceConfig)(nil), // 2: backup.v1.SourceConfig - (*S3Config)(nil), // 3: backup.v1.S3Config - (*BackupConfig)(nil), // 4: backup.v1.BackupConfig - (*GetConfigRequest)(nil), // 5: backup.v1.GetConfigRequest - (*GetConfigResponse)(nil), // 6: backup.v1.GetConfigResponse - (*UpdateConfigRequest)(nil), // 7: backup.v1.UpdateConfigRequest - (*UpdateConfigResponse)(nil), // 8: backup.v1.UpdateConfigResponse - (*SourceProfile)(nil), // 9: backup.v1.SourceProfile - (*ListSourceProfilesRequest)(nil), // 10: backup.v1.ListSourceProfilesRequest - (*ListSourceProfilesResponse)(nil), // 11: backup.v1.ListSourceProfilesResponse - (*CreateSourceProfileRequest)(nil), // 12: backup.v1.CreateSourceProfileRequest - (*CreateSourceProfileResponse)(nil), // 13: backup.v1.CreateSourceProfileResponse - (*UpdateSourceProfileRequest)(nil), // 14: backup.v1.UpdateSourceProfileRequest - (*UpdateSourceProfileResponse)(nil), // 15: backup.v1.UpdateSourceProfileResponse - (*DeleteSourceProfileRequest)(nil), // 16: backup.v1.DeleteSourceProfileRequest - (*DeleteSourceProfileResponse)(nil), // 17: backup.v1.DeleteSourceProfileResponse - (*SetActiveSourceProfileRequest)(nil), // 18: backup.v1.SetActiveSourceProfileRequest - (*SetActiveSourceProfileResponse)(nil), // 19: backup.v1.SetActiveSourceProfileResponse - (*ValidateS3Request)(nil), // 20: backup.v1.ValidateS3Request - (*ValidateS3Response)(nil), // 21: backup.v1.ValidateS3Response - (*S3Profile)(nil), // 22: backup.v1.S3Profile - (*ListS3ProfilesRequest)(nil), // 23: backup.v1.ListS3ProfilesRequest - (*ListS3ProfilesResponse)(nil), // 24: backup.v1.ListS3ProfilesResponse - (*CreateS3ProfileRequest)(nil), // 25: backup.v1.CreateS3ProfileRequest - (*CreateS3ProfileResponse)(nil), // 26: backup.v1.CreateS3ProfileResponse - (*UpdateS3ProfileRequest)(nil), // 27: backup.v1.UpdateS3ProfileRequest - (*UpdateS3ProfileResponse)(nil), // 28: backup.v1.UpdateS3ProfileResponse - (*DeleteS3ProfileRequest)(nil), // 29: backup.v1.DeleteS3ProfileRequest - (*DeleteS3ProfileResponse)(nil), // 30: backup.v1.DeleteS3ProfileResponse - (*SetActiveS3ProfileRequest)(nil), // 31: backup.v1.SetActiveS3ProfileRequest - (*SetActiveS3ProfileResponse)(nil), // 32: backup.v1.SetActiveS3ProfileResponse - (*CreateBackupJobRequest)(nil), // 33: backup.v1.CreateBackupJobRequest - (*BackupArtifact)(nil), // 34: backup.v1.BackupArtifact - (*BackupS3Object)(nil), // 35: backup.v1.BackupS3Object - (*BackupJob)(nil), // 36: backup.v1.BackupJob - (*CreateBackupJobResponse)(nil), // 37: backup.v1.CreateBackupJobResponse - (*ListBackupJobsRequest)(nil), // 38: backup.v1.ListBackupJobsRequest - (*ListBackupJobsResponse)(nil), // 39: backup.v1.ListBackupJobsResponse - (*GetBackupJobRequest)(nil), // 40: backup.v1.GetBackupJobRequest - (*GetBackupJobResponse)(nil), // 41: backup.v1.GetBackupJobResponse -} -var file_proto_backup_v1_backup_proto_depIdxs = []int32{ - 2, // 0: backup.v1.BackupConfig.postgres:type_name -> backup.v1.SourceConfig - 2, // 1: backup.v1.BackupConfig.redis:type_name -> backup.v1.SourceConfig - 3, // 2: backup.v1.BackupConfig.s3:type_name -> backup.v1.S3Config - 4, // 3: backup.v1.GetConfigResponse.config:type_name -> backup.v1.BackupConfig - 4, // 4: backup.v1.UpdateConfigRequest.config:type_name -> backup.v1.BackupConfig - 4, // 5: backup.v1.UpdateConfigResponse.config:type_name -> backup.v1.BackupConfig - 2, // 6: backup.v1.SourceProfile.config:type_name -> backup.v1.SourceConfig - 9, // 7: backup.v1.ListSourceProfilesResponse.items:type_name -> backup.v1.SourceProfile - 2, // 8: backup.v1.CreateSourceProfileRequest.config:type_name -> backup.v1.SourceConfig - 9, // 9: backup.v1.CreateSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile - 2, // 10: backup.v1.UpdateSourceProfileRequest.config:type_name -> backup.v1.SourceConfig - 9, // 11: backup.v1.UpdateSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile - 9, // 12: backup.v1.SetActiveSourceProfileResponse.profile:type_name -> backup.v1.SourceProfile - 3, // 13: backup.v1.ValidateS3Request.s3:type_name -> backup.v1.S3Config - 3, // 14: backup.v1.S3Profile.s3:type_name -> backup.v1.S3Config - 22, // 15: backup.v1.ListS3ProfilesResponse.items:type_name -> backup.v1.S3Profile - 3, // 16: backup.v1.CreateS3ProfileRequest.s3:type_name -> backup.v1.S3Config - 22, // 17: backup.v1.CreateS3ProfileResponse.profile:type_name -> backup.v1.S3Profile - 3, // 18: backup.v1.UpdateS3ProfileRequest.s3:type_name -> backup.v1.S3Config - 22, // 19: backup.v1.UpdateS3ProfileResponse.profile:type_name -> backup.v1.S3Profile - 22, // 20: backup.v1.SetActiveS3ProfileResponse.profile:type_name -> backup.v1.S3Profile - 34, // 21: backup.v1.BackupJob.artifact:type_name -> backup.v1.BackupArtifact - 35, // 22: backup.v1.BackupJob.s3_object:type_name -> backup.v1.BackupS3Object - 36, // 23: backup.v1.CreateBackupJobResponse.job:type_name -> backup.v1.BackupJob - 36, // 24: backup.v1.ListBackupJobsResponse.items:type_name -> backup.v1.BackupJob - 36, // 25: backup.v1.GetBackupJobResponse.job:type_name -> backup.v1.BackupJob - 0, // 26: backup.v1.BackupService.Health:input_type -> backup.v1.HealthRequest - 5, // 27: backup.v1.BackupService.GetConfig:input_type -> backup.v1.GetConfigRequest - 7, // 28: backup.v1.BackupService.UpdateConfig:input_type -> backup.v1.UpdateConfigRequest - 10, // 29: backup.v1.BackupService.ListSourceProfiles:input_type -> backup.v1.ListSourceProfilesRequest - 12, // 30: backup.v1.BackupService.CreateSourceProfile:input_type -> backup.v1.CreateSourceProfileRequest - 14, // 31: backup.v1.BackupService.UpdateSourceProfile:input_type -> backup.v1.UpdateSourceProfileRequest - 16, // 32: backup.v1.BackupService.DeleteSourceProfile:input_type -> backup.v1.DeleteSourceProfileRequest - 18, // 33: backup.v1.BackupService.SetActiveSourceProfile:input_type -> backup.v1.SetActiveSourceProfileRequest - 20, // 34: backup.v1.BackupService.ValidateS3:input_type -> backup.v1.ValidateS3Request - 23, // 35: backup.v1.BackupService.ListS3Profiles:input_type -> backup.v1.ListS3ProfilesRequest - 25, // 36: backup.v1.BackupService.CreateS3Profile:input_type -> backup.v1.CreateS3ProfileRequest - 27, // 37: backup.v1.BackupService.UpdateS3Profile:input_type -> backup.v1.UpdateS3ProfileRequest - 29, // 38: backup.v1.BackupService.DeleteS3Profile:input_type -> backup.v1.DeleteS3ProfileRequest - 31, // 39: backup.v1.BackupService.SetActiveS3Profile:input_type -> backup.v1.SetActiveS3ProfileRequest - 33, // 40: backup.v1.BackupService.CreateBackupJob:input_type -> backup.v1.CreateBackupJobRequest - 38, // 41: backup.v1.BackupService.ListBackupJobs:input_type -> backup.v1.ListBackupJobsRequest - 40, // 42: backup.v1.BackupService.GetBackupJob:input_type -> backup.v1.GetBackupJobRequest - 1, // 43: backup.v1.BackupService.Health:output_type -> backup.v1.HealthResponse - 6, // 44: backup.v1.BackupService.GetConfig:output_type -> backup.v1.GetConfigResponse - 8, // 45: backup.v1.BackupService.UpdateConfig:output_type -> backup.v1.UpdateConfigResponse - 11, // 46: backup.v1.BackupService.ListSourceProfiles:output_type -> backup.v1.ListSourceProfilesResponse - 13, // 47: backup.v1.BackupService.CreateSourceProfile:output_type -> backup.v1.CreateSourceProfileResponse - 15, // 48: backup.v1.BackupService.UpdateSourceProfile:output_type -> backup.v1.UpdateSourceProfileResponse - 17, // 49: backup.v1.BackupService.DeleteSourceProfile:output_type -> backup.v1.DeleteSourceProfileResponse - 19, // 50: backup.v1.BackupService.SetActiveSourceProfile:output_type -> backup.v1.SetActiveSourceProfileResponse - 21, // 51: backup.v1.BackupService.ValidateS3:output_type -> backup.v1.ValidateS3Response - 24, // 52: backup.v1.BackupService.ListS3Profiles:output_type -> backup.v1.ListS3ProfilesResponse - 26, // 53: backup.v1.BackupService.CreateS3Profile:output_type -> backup.v1.CreateS3ProfileResponse - 28, // 54: backup.v1.BackupService.UpdateS3Profile:output_type -> backup.v1.UpdateS3ProfileResponse - 30, // 55: backup.v1.BackupService.DeleteS3Profile:output_type -> backup.v1.DeleteS3ProfileResponse - 32, // 56: backup.v1.BackupService.SetActiveS3Profile:output_type -> backup.v1.SetActiveS3ProfileResponse - 37, // 57: backup.v1.BackupService.CreateBackupJob:output_type -> backup.v1.CreateBackupJobResponse - 39, // 58: backup.v1.BackupService.ListBackupJobs:output_type -> backup.v1.ListBackupJobsResponse - 41, // 59: backup.v1.BackupService.GetBackupJob:output_type -> backup.v1.GetBackupJobResponse - 43, // [43:60] is the sub-list for method output_type - 26, // [26:43] is the sub-list for method input_type - 26, // [26:26] is the sub-list for extension type_name - 26, // [26:26] is the sub-list for extension extendee - 0, // [0:26] is the sub-list for field type_name -} - -func init() { file_proto_backup_v1_backup_proto_init() } -func file_proto_backup_v1_backup_proto_init() { - if File_proto_backup_v1_backup_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_proto_backup_v1_backup_proto_rawDesc), len(file_proto_backup_v1_backup_proto_rawDesc)), - NumEnums: 0, - NumMessages: 42, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_proto_backup_v1_backup_proto_goTypes, - DependencyIndexes: file_proto_backup_v1_backup_proto_depIdxs, - MessageInfos: file_proto_backup_v1_backup_proto_msgTypes, - }.Build() - File_proto_backup_v1_backup_proto = out.File - file_proto_backup_v1_backup_proto_goTypes = nil - file_proto_backup_v1_backup_proto_depIdxs = nil -} diff --git a/backup/proto/backup/v1/backup.proto b/backup/proto/backup/v1/backup.proto deleted file mode 100644 index 8faa51d14..000000000 --- a/backup/proto/backup/v1/backup.proto +++ /dev/null @@ -1,267 +0,0 @@ -syntax = "proto3"; - -package backup.v1; - -option go_package = "github.com/Wei-Shaw/sub2api/backup/proto/backup/v1;backupv1"; - -service BackupService { - rpc Health(HealthRequest) returns (HealthResponse); - rpc GetConfig(GetConfigRequest) returns (GetConfigResponse); - rpc UpdateConfig(UpdateConfigRequest) returns (UpdateConfigResponse); - rpc ListSourceProfiles(ListSourceProfilesRequest) returns (ListSourceProfilesResponse); - rpc CreateSourceProfile(CreateSourceProfileRequest) returns (CreateSourceProfileResponse); - rpc UpdateSourceProfile(UpdateSourceProfileRequest) returns (UpdateSourceProfileResponse); - rpc DeleteSourceProfile(DeleteSourceProfileRequest) returns (DeleteSourceProfileResponse); - rpc SetActiveSourceProfile(SetActiveSourceProfileRequest) returns (SetActiveSourceProfileResponse); - rpc ValidateS3(ValidateS3Request) returns (ValidateS3Response); - rpc ListS3Profiles(ListS3ProfilesRequest) returns (ListS3ProfilesResponse); - rpc CreateS3Profile(CreateS3ProfileRequest) returns (CreateS3ProfileResponse); - rpc UpdateS3Profile(UpdateS3ProfileRequest) returns (UpdateS3ProfileResponse); - rpc DeleteS3Profile(DeleteS3ProfileRequest) returns (DeleteS3ProfileResponse); - rpc SetActiveS3Profile(SetActiveS3ProfileRequest) returns (SetActiveS3ProfileResponse); - rpc CreateBackupJob(CreateBackupJobRequest) returns (CreateBackupJobResponse); - rpc ListBackupJobs(ListBackupJobsRequest) returns (ListBackupJobsResponse); - rpc GetBackupJob(GetBackupJobRequest) returns (GetBackupJobResponse); -} - -message HealthRequest {} - -message HealthResponse { - string status = 1; - string version = 2; - int64 uptime_seconds = 3; -} - -message SourceConfig { - string host = 1; - int32 port = 2; - string user = 3; - string password = 4; - string database = 5; - string ssl_mode = 6; - string addr = 7; - string username = 8; - int32 db = 9; - string container_name = 10; -} - -message S3Config { - bool enabled = 1; - string endpoint = 2; - string region = 3; - string bucket = 4; - string access_key_id = 5; - string secret_access_key = 6; - string prefix = 7; - bool force_path_style = 8; - bool use_ssl = 9; -} - -message BackupConfig { - string source_mode = 1; - string backup_root = 2; - string sqlite_path = 3; - int32 retention_days = 4; - int32 keep_last = 5; - SourceConfig postgres = 6; - SourceConfig redis = 7; - S3Config s3 = 8; - string active_s3_profile_id = 9; - string active_postgres_profile_id = 10; - string active_redis_profile_id = 11; -} - -message GetConfigRequest {} - -message GetConfigResponse { - BackupConfig config = 1; -} - -message UpdateConfigRequest { - BackupConfig config = 1; -} - -message UpdateConfigResponse { - BackupConfig config = 1; -} - -message SourceProfile { - string source_type = 1; - string profile_id = 2; - string name = 3; - bool is_active = 4; - SourceConfig config = 5; - bool password_configured = 6; - string created_at = 7; - string updated_at = 8; -} - -message ListSourceProfilesRequest { - string source_type = 1; -} - -message ListSourceProfilesResponse { - repeated SourceProfile items = 1; -} - -message CreateSourceProfileRequest { - string source_type = 1; - string profile_id = 2; - string name = 3; - SourceConfig config = 4; - bool set_active = 5; -} - -message CreateSourceProfileResponse { - SourceProfile profile = 1; -} - -message UpdateSourceProfileRequest { - string source_type = 1; - string profile_id = 2; - string name = 3; - SourceConfig config = 4; -} - -message UpdateSourceProfileResponse { - SourceProfile profile = 1; -} - -message DeleteSourceProfileRequest { - string source_type = 1; - string profile_id = 2; -} - -message DeleteSourceProfileResponse {} - -message SetActiveSourceProfileRequest { - string source_type = 1; - string profile_id = 2; -} - -message SetActiveSourceProfileResponse { - SourceProfile profile = 1; -} - -message ValidateS3Request { - S3Config s3 = 1; -} - -message ValidateS3Response { - bool ok = 1; - string message = 2; -} - -message S3Profile { - string profile_id = 1; - string name = 2; - bool is_active = 3; - S3Config s3 = 4; - bool secret_access_key_configured = 5; - string created_at = 6; - string updated_at = 7; -} - -message ListS3ProfilesRequest {} - -message ListS3ProfilesResponse { - repeated S3Profile items = 1; -} - -message CreateS3ProfileRequest { - string profile_id = 1; - string name = 2; - S3Config s3 = 3; - bool set_active = 4; -} - -message CreateS3ProfileResponse { - S3Profile profile = 1; -} - -message UpdateS3ProfileRequest { - string profile_id = 1; - string name = 2; - S3Config s3 = 3; -} - -message UpdateS3ProfileResponse { - S3Profile profile = 1; -} - -message DeleteS3ProfileRequest { - string profile_id = 1; -} - -message DeleteS3ProfileResponse {} - -message SetActiveS3ProfileRequest { - string profile_id = 1; -} - -message SetActiveS3ProfileResponse { - S3Profile profile = 1; -} - -message CreateBackupJobRequest { - string backup_type = 1; - bool upload_to_s3 = 2; - string triggered_by = 3; - string idempotency_key = 4; - string s3_profile_id = 5; - string postgres_profile_id = 6; - string redis_profile_id = 7; -} - -message BackupArtifact { - string local_path = 1; - int64 size_bytes = 2; - string sha256 = 3; -} - -message BackupS3Object { - string bucket = 1; - string key = 2; - string etag = 3; -} - -message BackupJob { - string job_id = 1; - string backup_type = 2; - string status = 3; - string triggered_by = 4; - string idempotency_key = 5; - bool upload_to_s3 = 6; - string started_at = 7; - string finished_at = 8; - string error_message = 9; - BackupArtifact artifact = 10; - BackupS3Object s3_object = 11; - string s3_profile_id = 12; - string postgres_profile_id = 13; - string redis_profile_id = 14; -} - -message CreateBackupJobResponse { - BackupJob job = 1; -} - -message ListBackupJobsRequest { - int32 page_size = 1; - string page_token = 2; - string status = 3; - string backup_type = 4; -} - -message ListBackupJobsResponse { - repeated BackupJob items = 1; - string next_page_token = 2; -} - -message GetBackupJobRequest { - string job_id = 1; -} - -message GetBackupJobResponse { - BackupJob job = 1; -} diff --git a/backup/proto/backup/v1/backup_grpc.pb.go b/backup/proto/backup/v1/backup_grpc.pb.go deleted file mode 100644 index c4731e1a1..000000000 --- a/backup/proto/backup/v1/backup_grpc.pb.go +++ /dev/null @@ -1,729 +0,0 @@ -// Code generated by protoc-gen-go-grpc. DO NOT EDIT. -// versions: -// - protoc-gen-go-grpc v1.6.0 -// - protoc v6.32.1 -// source: proto/backup/v1/backup.proto - -package backupv1 - -import ( - context "context" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" -) - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -// Requires gRPC-Go v1.64.0 or later. -const _ = grpc.SupportPackageIsVersion9 - -const ( - BackupService_Health_FullMethodName = "/backup.v1.BackupService/Health" - BackupService_GetConfig_FullMethodName = "/backup.v1.BackupService/GetConfig" - BackupService_UpdateConfig_FullMethodName = "/backup.v1.BackupService/UpdateConfig" - BackupService_ListSourceProfiles_FullMethodName = "/backup.v1.BackupService/ListSourceProfiles" - BackupService_CreateSourceProfile_FullMethodName = "/backup.v1.BackupService/CreateSourceProfile" - BackupService_UpdateSourceProfile_FullMethodName = "/backup.v1.BackupService/UpdateSourceProfile" - BackupService_DeleteSourceProfile_FullMethodName = "/backup.v1.BackupService/DeleteSourceProfile" - BackupService_SetActiveSourceProfile_FullMethodName = "/backup.v1.BackupService/SetActiveSourceProfile" - BackupService_ValidateS3_FullMethodName = "/backup.v1.BackupService/ValidateS3" - BackupService_ListS3Profiles_FullMethodName = "/backup.v1.BackupService/ListS3Profiles" - BackupService_CreateS3Profile_FullMethodName = "/backup.v1.BackupService/CreateS3Profile" - BackupService_UpdateS3Profile_FullMethodName = "/backup.v1.BackupService/UpdateS3Profile" - BackupService_DeleteS3Profile_FullMethodName = "/backup.v1.BackupService/DeleteS3Profile" - BackupService_SetActiveS3Profile_FullMethodName = "/backup.v1.BackupService/SetActiveS3Profile" - BackupService_CreateBackupJob_FullMethodName = "/backup.v1.BackupService/CreateBackupJob" - BackupService_ListBackupJobs_FullMethodName = "/backup.v1.BackupService/ListBackupJobs" - BackupService_GetBackupJob_FullMethodName = "/backup.v1.BackupService/GetBackupJob" -) - -// BackupServiceClient is the client API for BackupService service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. -type BackupServiceClient interface { - Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) - GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) - UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) - ListSourceProfiles(ctx context.Context, in *ListSourceProfilesRequest, opts ...grpc.CallOption) (*ListSourceProfilesResponse, error) - CreateSourceProfile(ctx context.Context, in *CreateSourceProfileRequest, opts ...grpc.CallOption) (*CreateSourceProfileResponse, error) - UpdateSourceProfile(ctx context.Context, in *UpdateSourceProfileRequest, opts ...grpc.CallOption) (*UpdateSourceProfileResponse, error) - DeleteSourceProfile(ctx context.Context, in *DeleteSourceProfileRequest, opts ...grpc.CallOption) (*DeleteSourceProfileResponse, error) - SetActiveSourceProfile(ctx context.Context, in *SetActiveSourceProfileRequest, opts ...grpc.CallOption) (*SetActiveSourceProfileResponse, error) - ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) - ListS3Profiles(ctx context.Context, in *ListS3ProfilesRequest, opts ...grpc.CallOption) (*ListS3ProfilesResponse, error) - CreateS3Profile(ctx context.Context, in *CreateS3ProfileRequest, opts ...grpc.CallOption) (*CreateS3ProfileResponse, error) - UpdateS3Profile(ctx context.Context, in *UpdateS3ProfileRequest, opts ...grpc.CallOption) (*UpdateS3ProfileResponse, error) - DeleteS3Profile(ctx context.Context, in *DeleteS3ProfileRequest, opts ...grpc.CallOption) (*DeleteS3ProfileResponse, error) - SetActiveS3Profile(ctx context.Context, in *SetActiveS3ProfileRequest, opts ...grpc.CallOption) (*SetActiveS3ProfileResponse, error) - CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) - ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) - GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) -} - -type backupServiceClient struct { - cc grpc.ClientConnInterface -} - -func NewBackupServiceClient(cc grpc.ClientConnInterface) BackupServiceClient { - return &backupServiceClient{cc} -} - -func (c *backupServiceClient) Health(ctx context.Context, in *HealthRequest, opts ...grpc.CallOption) (*HealthResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(HealthResponse) - err := c.cc.Invoke(ctx, BackupService_Health_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) GetConfig(ctx context.Context, in *GetConfigRequest, opts ...grpc.CallOption) (*GetConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetConfigResponse) - err := c.cc.Invoke(ctx, BackupService_GetConfig_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) UpdateConfig(ctx context.Context, in *UpdateConfigRequest, opts ...grpc.CallOption) (*UpdateConfigResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(UpdateConfigResponse) - err := c.cc.Invoke(ctx, BackupService_UpdateConfig_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) ListSourceProfiles(ctx context.Context, in *ListSourceProfilesRequest, opts ...grpc.CallOption) (*ListSourceProfilesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ListSourceProfilesResponse) - err := c.cc.Invoke(ctx, BackupService_ListSourceProfiles_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) CreateSourceProfile(ctx context.Context, in *CreateSourceProfileRequest, opts ...grpc.CallOption) (*CreateSourceProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreateSourceProfileResponse) - err := c.cc.Invoke(ctx, BackupService_CreateSourceProfile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) UpdateSourceProfile(ctx context.Context, in *UpdateSourceProfileRequest, opts ...grpc.CallOption) (*UpdateSourceProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(UpdateSourceProfileResponse) - err := c.cc.Invoke(ctx, BackupService_UpdateSourceProfile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) DeleteSourceProfile(ctx context.Context, in *DeleteSourceProfileRequest, opts ...grpc.CallOption) (*DeleteSourceProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(DeleteSourceProfileResponse) - err := c.cc.Invoke(ctx, BackupService_DeleteSourceProfile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) SetActiveSourceProfile(ctx context.Context, in *SetActiveSourceProfileRequest, opts ...grpc.CallOption) (*SetActiveSourceProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SetActiveSourceProfileResponse) - err := c.cc.Invoke(ctx, BackupService_SetActiveSourceProfile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) ValidateS3(ctx context.Context, in *ValidateS3Request, opts ...grpc.CallOption) (*ValidateS3Response, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ValidateS3Response) - err := c.cc.Invoke(ctx, BackupService_ValidateS3_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) ListS3Profiles(ctx context.Context, in *ListS3ProfilesRequest, opts ...grpc.CallOption) (*ListS3ProfilesResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ListS3ProfilesResponse) - err := c.cc.Invoke(ctx, BackupService_ListS3Profiles_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) CreateS3Profile(ctx context.Context, in *CreateS3ProfileRequest, opts ...grpc.CallOption) (*CreateS3ProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreateS3ProfileResponse) - err := c.cc.Invoke(ctx, BackupService_CreateS3Profile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) UpdateS3Profile(ctx context.Context, in *UpdateS3ProfileRequest, opts ...grpc.CallOption) (*UpdateS3ProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(UpdateS3ProfileResponse) - err := c.cc.Invoke(ctx, BackupService_UpdateS3Profile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) DeleteS3Profile(ctx context.Context, in *DeleteS3ProfileRequest, opts ...grpc.CallOption) (*DeleteS3ProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(DeleteS3ProfileResponse) - err := c.cc.Invoke(ctx, BackupService_DeleteS3Profile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) SetActiveS3Profile(ctx context.Context, in *SetActiveS3ProfileRequest, opts ...grpc.CallOption) (*SetActiveS3ProfileResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(SetActiveS3ProfileResponse) - err := c.cc.Invoke(ctx, BackupService_SetActiveS3Profile_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) CreateBackupJob(ctx context.Context, in *CreateBackupJobRequest, opts ...grpc.CallOption) (*CreateBackupJobResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(CreateBackupJobResponse) - err := c.cc.Invoke(ctx, BackupService_CreateBackupJob_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) ListBackupJobs(ctx context.Context, in *ListBackupJobsRequest, opts ...grpc.CallOption) (*ListBackupJobsResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(ListBackupJobsResponse) - err := c.cc.Invoke(ctx, BackupService_ListBackupJobs_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *backupServiceClient) GetBackupJob(ctx context.Context, in *GetBackupJobRequest, opts ...grpc.CallOption) (*GetBackupJobResponse, error) { - cOpts := append([]grpc.CallOption{grpc.StaticMethod()}, opts...) - out := new(GetBackupJobResponse) - err := c.cc.Invoke(ctx, BackupService_GetBackupJob_FullMethodName, in, out, cOpts...) - if err != nil { - return nil, err - } - return out, nil -} - -// BackupServiceServer is the server API for BackupService service. -// All implementations must embed UnimplementedBackupServiceServer -// for forward compatibility. -type BackupServiceServer interface { - Health(context.Context, *HealthRequest) (*HealthResponse, error) - GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) - UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) - ListSourceProfiles(context.Context, *ListSourceProfilesRequest) (*ListSourceProfilesResponse, error) - CreateSourceProfile(context.Context, *CreateSourceProfileRequest) (*CreateSourceProfileResponse, error) - UpdateSourceProfile(context.Context, *UpdateSourceProfileRequest) (*UpdateSourceProfileResponse, error) - DeleteSourceProfile(context.Context, *DeleteSourceProfileRequest) (*DeleteSourceProfileResponse, error) - SetActiveSourceProfile(context.Context, *SetActiveSourceProfileRequest) (*SetActiveSourceProfileResponse, error) - ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) - ListS3Profiles(context.Context, *ListS3ProfilesRequest) (*ListS3ProfilesResponse, error) - CreateS3Profile(context.Context, *CreateS3ProfileRequest) (*CreateS3ProfileResponse, error) - UpdateS3Profile(context.Context, *UpdateS3ProfileRequest) (*UpdateS3ProfileResponse, error) - DeleteS3Profile(context.Context, *DeleteS3ProfileRequest) (*DeleteS3ProfileResponse, error) - SetActiveS3Profile(context.Context, *SetActiveS3ProfileRequest) (*SetActiveS3ProfileResponse, error) - CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) - ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) - GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) - mustEmbedUnimplementedBackupServiceServer() -} - -// UnimplementedBackupServiceServer must be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedBackupServiceServer struct{} - -func (UnimplementedBackupServiceServer) Health(context.Context, *HealthRequest) (*HealthResponse, error) { - return nil, status.Error(codes.Unimplemented, "method Health not implemented") -} -func (UnimplementedBackupServiceServer) GetConfig(context.Context, *GetConfigRequest) (*GetConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetConfig not implemented") -} -func (UnimplementedBackupServiceServer) UpdateConfig(context.Context, *UpdateConfigRequest) (*UpdateConfigResponse, error) { - return nil, status.Error(codes.Unimplemented, "method UpdateConfig not implemented") -} -func (UnimplementedBackupServiceServer) ListSourceProfiles(context.Context, *ListSourceProfilesRequest) (*ListSourceProfilesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListSourceProfiles not implemented") -} -func (UnimplementedBackupServiceServer) CreateSourceProfile(context.Context, *CreateSourceProfileRequest) (*CreateSourceProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CreateSourceProfile not implemented") -} -func (UnimplementedBackupServiceServer) UpdateSourceProfile(context.Context, *UpdateSourceProfileRequest) (*UpdateSourceProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method UpdateSourceProfile not implemented") -} -func (UnimplementedBackupServiceServer) DeleteSourceProfile(context.Context, *DeleteSourceProfileRequest) (*DeleteSourceProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeleteSourceProfile not implemented") -} -func (UnimplementedBackupServiceServer) SetActiveSourceProfile(context.Context, *SetActiveSourceProfileRequest) (*SetActiveSourceProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetActiveSourceProfile not implemented") -} -func (UnimplementedBackupServiceServer) ValidateS3(context.Context, *ValidateS3Request) (*ValidateS3Response, error) { - return nil, status.Error(codes.Unimplemented, "method ValidateS3 not implemented") -} -func (UnimplementedBackupServiceServer) ListS3Profiles(context.Context, *ListS3ProfilesRequest) (*ListS3ProfilesResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListS3Profiles not implemented") -} -func (UnimplementedBackupServiceServer) CreateS3Profile(context.Context, *CreateS3ProfileRequest) (*CreateS3ProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CreateS3Profile not implemented") -} -func (UnimplementedBackupServiceServer) UpdateS3Profile(context.Context, *UpdateS3ProfileRequest) (*UpdateS3ProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method UpdateS3Profile not implemented") -} -func (UnimplementedBackupServiceServer) DeleteS3Profile(context.Context, *DeleteS3ProfileRequest) (*DeleteS3ProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method DeleteS3Profile not implemented") -} -func (UnimplementedBackupServiceServer) SetActiveS3Profile(context.Context, *SetActiveS3ProfileRequest) (*SetActiveS3ProfileResponse, error) { - return nil, status.Error(codes.Unimplemented, "method SetActiveS3Profile not implemented") -} -func (UnimplementedBackupServiceServer) CreateBackupJob(context.Context, *CreateBackupJobRequest) (*CreateBackupJobResponse, error) { - return nil, status.Error(codes.Unimplemented, "method CreateBackupJob not implemented") -} -func (UnimplementedBackupServiceServer) ListBackupJobs(context.Context, *ListBackupJobsRequest) (*ListBackupJobsResponse, error) { - return nil, status.Error(codes.Unimplemented, "method ListBackupJobs not implemented") -} -func (UnimplementedBackupServiceServer) GetBackupJob(context.Context, *GetBackupJobRequest) (*GetBackupJobResponse, error) { - return nil, status.Error(codes.Unimplemented, "method GetBackupJob not implemented") -} -func (UnimplementedBackupServiceServer) mustEmbedUnimplementedBackupServiceServer() {} -func (UnimplementedBackupServiceServer) testEmbeddedByValue() {} - -// UnsafeBackupServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to BackupServiceServer will -// result in compilation errors. -type UnsafeBackupServiceServer interface { - mustEmbedUnimplementedBackupServiceServer() -} - -func RegisterBackupServiceServer(s grpc.ServiceRegistrar, srv BackupServiceServer) { - // If the following call panics, it indicates UnimplementedBackupServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - s.RegisterService(&BackupService_ServiceDesc, srv) -} - -func _BackupService_Health_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(HealthRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).Health(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_Health_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).Health(ctx, req.(*HealthRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_GetConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).GetConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_GetConfig_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).GetConfig(ctx, req.(*GetConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_UpdateConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateConfigRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).UpdateConfig(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_UpdateConfig_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).UpdateConfig(ctx, req.(*UpdateConfigRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_ListSourceProfiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListSourceProfilesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).ListSourceProfiles(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_ListSourceProfiles_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).ListSourceProfiles(ctx, req.(*ListSourceProfilesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_CreateSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateSourceProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).CreateSourceProfile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_CreateSourceProfile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).CreateSourceProfile(ctx, req.(*CreateSourceProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_UpdateSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateSourceProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).UpdateSourceProfile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_UpdateSourceProfile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).UpdateSourceProfile(ctx, req.(*UpdateSourceProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_DeleteSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteSourceProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).DeleteSourceProfile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_DeleteSourceProfile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).DeleteSourceProfile(ctx, req.(*DeleteSourceProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_SetActiveSourceProfile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetActiveSourceProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).SetActiveSourceProfile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_SetActiveSourceProfile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).SetActiveSourceProfile(ctx, req.(*SetActiveSourceProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_ValidateS3_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ValidateS3Request) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).ValidateS3(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_ValidateS3_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).ValidateS3(ctx, req.(*ValidateS3Request)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_ListS3Profiles_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListS3ProfilesRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).ListS3Profiles(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_ListS3Profiles_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).ListS3Profiles(ctx, req.(*ListS3ProfilesRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_CreateS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateS3ProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).CreateS3Profile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_CreateS3Profile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).CreateS3Profile(ctx, req.(*CreateS3ProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_UpdateS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateS3ProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).UpdateS3Profile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_UpdateS3Profile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).UpdateS3Profile(ctx, req.(*UpdateS3ProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_DeleteS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteS3ProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).DeleteS3Profile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_DeleteS3Profile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).DeleteS3Profile(ctx, req.(*DeleteS3ProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_SetActiveS3Profile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SetActiveS3ProfileRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).SetActiveS3Profile(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_SetActiveS3Profile_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).SetActiveS3Profile(ctx, req.(*SetActiveS3ProfileRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_CreateBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateBackupJobRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).CreateBackupJob(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_CreateBackupJob_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).CreateBackupJob(ctx, req.(*CreateBackupJobRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_ListBackupJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListBackupJobsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).ListBackupJobs(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_ListBackupJobs_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).ListBackupJobs(ctx, req.(*ListBackupJobsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _BackupService_GetBackupJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetBackupJobRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BackupServiceServer).GetBackupJob(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: BackupService_GetBackupJob_FullMethodName, - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BackupServiceServer).GetBackupJob(ctx, req.(*GetBackupJobRequest)) - } - return interceptor(ctx, in, info, handler) -} - -// BackupService_ServiceDesc is the grpc.ServiceDesc for BackupService service. -// It's only intended for direct use with grpc.RegisterService, -// and not to be introspected or modified (even as a copy) -var BackupService_ServiceDesc = grpc.ServiceDesc{ - ServiceName: "backup.v1.BackupService", - HandlerType: (*BackupServiceServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Health", - Handler: _BackupService_Health_Handler, - }, - { - MethodName: "GetConfig", - Handler: _BackupService_GetConfig_Handler, - }, - { - MethodName: "UpdateConfig", - Handler: _BackupService_UpdateConfig_Handler, - }, - { - MethodName: "ListSourceProfiles", - Handler: _BackupService_ListSourceProfiles_Handler, - }, - { - MethodName: "CreateSourceProfile", - Handler: _BackupService_CreateSourceProfile_Handler, - }, - { - MethodName: "UpdateSourceProfile", - Handler: _BackupService_UpdateSourceProfile_Handler, - }, - { - MethodName: "DeleteSourceProfile", - Handler: _BackupService_DeleteSourceProfile_Handler, - }, - { - MethodName: "SetActiveSourceProfile", - Handler: _BackupService_SetActiveSourceProfile_Handler, - }, - { - MethodName: "ValidateS3", - Handler: _BackupService_ValidateS3_Handler, - }, - { - MethodName: "ListS3Profiles", - Handler: _BackupService_ListS3Profiles_Handler, - }, - { - MethodName: "CreateS3Profile", - Handler: _BackupService_CreateS3Profile_Handler, - }, - { - MethodName: "UpdateS3Profile", - Handler: _BackupService_UpdateS3Profile_Handler, - }, - { - MethodName: "DeleteS3Profile", - Handler: _BackupService_DeleteS3Profile_Handler, - }, - { - MethodName: "SetActiveS3Profile", - Handler: _BackupService_SetActiveS3Profile_Handler, - }, - { - MethodName: "CreateBackupJob", - Handler: _BackupService_CreateBackupJob_Handler, - }, - { - MethodName: "ListBackupJobs", - Handler: _BackupService_ListBackupJobs_Handler, - }, - { - MethodName: "GetBackupJob", - Handler: _BackupService_GetBackupJob_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "proto/backup/v1/backup.proto", -} diff --git a/deploy/BACKUPD_CN.md b/deploy/BACKUPD_CN.md deleted file mode 100644 index e023cc1a8..000000000 --- a/deploy/BACKUPD_CN.md +++ /dev/null @@ -1,78 +0,0 @@ -# backupd 部署说明(数据管理) - -本文说明如何在宿主机部署 `backupd`,并与主进程联动开启“数据管理”功能。 - -## 1. 关键约束 - -- 主进程固定探测路径:`/tmp/sub2api-backup.sock` -- 仅当该 Unix Socket 可连通且 `Health` 成功时,后台“数据管理”才会启用 -- `backupd` 使用 SQLite 持久化元数据,不依赖主库 - -## 2. 宿主机构建与运行 - -```bash -cd /opt/sub2api-src/backup -go build -o /opt/sub2api/backupd ./cmd/backupd - -mkdir -p /var/lib/sub2api/backup -chown -R sub2api:sub2api /var/lib/sub2api/backup -``` - -手动启动示例: - -```bash -/opt/sub2api/backupd \ - -socket-path /tmp/sub2api-backup.sock \ - -sqlite-path /var/lib/sub2api/backup/backupd.db \ - -version 1.0.0 -``` - -## 3. systemd 托管(推荐) - -仓库已提供示例服务文件:`deploy/sub2api-backupd.service` - -```bash -sudo cp deploy/sub2api-backupd.service /etc/systemd/system/ -sudo systemctl daemon-reload -sudo systemctl enable --now sub2api-backupd -sudo systemctl status sub2api-backupd -``` - -查看日志: - -```bash -sudo journalctl -u sub2api-backupd -f -``` - -也可以使用一键安装脚本(自动安装二进制 + 注册 systemd): - -```bash -# 方式一:使用现成二进制 -sudo ./deploy/install-backupd.sh --binary /path/to/backupd - -# 方式二:从源码构建后安装 -sudo ./deploy/install-backupd.sh --source /path/to/sub2api -``` - -## 4. Docker 部署联动 - -若 `sub2api` 运行在 Docker 容器中,需要将宿主机 Socket 挂载到容器同路径: - -```yaml -services: - sub2api: - volumes: - - /tmp/sub2api-backup.sock:/tmp/sub2api-backup.sock -``` - -建议在 `docker-compose.override.yml` 中维护该挂载,避免覆盖主 compose 文件。 - -## 5. 依赖检查 - -`backupd` 执行备份时依赖以下工具: - -- `pg_dump` -- `redis-cli` -- `docker`(仅 `source_mode=docker_exec` 时) - -缺失依赖会导致对应任务失败,并在任务详情中体现错误信息。 diff --git a/deploy/DATAMANAGEMENTD_CN.md b/deploy/DATAMANAGEMENTD_CN.md new file mode 100644 index 000000000..774f03aed --- /dev/null +++ b/deploy/DATAMANAGEMENTD_CN.md @@ -0,0 +1,78 @@ +# datamanagementd 部署说明(数据管理) + +本文说明如何在宿主机部署 `datamanagementd`,并与主进程联动开启“数据管理”功能。 + +## 1. 关键约束 + +- 主进程固定探测路径:`/tmp/sub2api-datamanagement.sock` +- 仅当该 Unix Socket 可连通且 `Health` 成功时,后台“数据管理”才会启用 +- `datamanagementd` 使用 SQLite 持久化元数据,不依赖主库 + +## 2. 宿主机构建与运行 + +```bash +cd /opt/sub2api-src/datamanagement +go build -o /opt/sub2api/datamanagementd ./cmd/datamanagementd + +mkdir -p /var/lib/sub2api/datamanagement +chown -R sub2api:sub2api /var/lib/sub2api/datamanagement +``` + +手动启动示例: + +```bash +/opt/sub2api/datamanagementd \ + -socket-path /tmp/sub2api-datamanagement.sock \ + -sqlite-path /var/lib/sub2api/datamanagement/datamanagementd.db \ + -version 1.0.0 +``` + +## 3. systemd 托管(推荐) + +仓库已提供示例服务文件:`deploy/sub2api-datamanagementd.service` + +```bash +sudo cp deploy/sub2api-datamanagementd.service /etc/systemd/system/ +sudo systemctl daemon-reload +sudo systemctl enable --now sub2api-datamanagementd +sudo systemctl status sub2api-datamanagementd +``` + +查看日志: + +```bash +sudo journalctl -u sub2api-datamanagementd -f +``` + +也可以使用一键安装脚本(自动安装二进制 + 注册 systemd): + +```bash +# 方式一:使用现成二进制 +sudo ./deploy/install-datamanagementd.sh --binary /path/to/datamanagementd + +# 方式二:从源码构建后安装 +sudo ./deploy/install-datamanagementd.sh --source /path/to/sub2api +``` + +## 4. Docker 部署联动 + +若 `sub2api` 运行在 Docker 容器中,需要将宿主机 Socket 挂载到容器同路径: + +```yaml +services: + sub2api: + volumes: + - /tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock +``` + +建议在 `docker-compose.override.yml` 中维护该挂载,避免覆盖主 compose 文件。 + +## 5. 依赖检查 + +`datamanagementd` 执行备份时依赖以下工具: + +- `pg_dump` +- `redis-cli` +- `docker`(仅 `source_mode=docker_exec` 时) + +缺失依赖会导致对应任务失败,并在任务详情中体现错误信息。 diff --git a/deploy/README.md b/deploy/README.md index f5b53ec2f..807bf510c 100644 --- a/deploy/README.md +++ b/deploy/README.md @@ -19,10 +19,10 @@ This directory contains files for deploying Sub2API on Linux servers. | `.env.example` | Docker environment variables template | | `DOCKER.md` | Docker Hub documentation | | `install.sh` | One-click binary installation script | -| `install-backupd.sh` | backupd 一键安装脚本 | +| `install-datamanagementd.sh` | datamanagementd 一键安装脚本 | | `sub2api.service` | Systemd service unit file | -| `sub2api-backupd.service` | backupd systemd service unit file | -| `BACKUPD_CN.md` | backupd 部署与联动说明(中文) | +| `sub2api-datamanagementd.service` | datamanagementd systemd service unit file | +| `DATAMANAGEMENTD_CN.md` | datamanagementd 部署与联动说明(中文) | | `config.example.yaml` | Example configuration file | --- @@ -148,13 +148,13 @@ SELECT (SELECT COUNT(*) FROM user_allowed_groups) AS new_pair_count; ``` -### backupd(数据管理)联动 +### datamanagementd(数据管理)联动 -如需启用管理后台“数据管理”功能,请额外部署宿主机 `backupd`: +如需启用管理后台“数据管理”功能,请额外部署宿主机 `datamanagementd`: -- 主进程固定探测 `/tmp/sub2api-backup.sock` +- 主进程固定探测 `/tmp/sub2api-datamanagement.sock` - Docker 场景下需把宿主机 Socket 挂载到容器内同路径 -- 详细步骤见:`deploy/BACKUPD_CN.md` +- 详细步骤见:`deploy/DATAMANAGEMENTD_CN.md` ### Commands @@ -586,7 +586,7 @@ gateway: name: "Profile 2" cipher_suites: [4866, 4867, 4865, 49199, 49195, 49200, 49196] curves: [29, 23, 24] - point_formats: [0] + point_formats: 0 # Another custom profile profile_3: diff --git a/deploy/docker-compose.override.yml.example b/deploy/docker-compose.override.yml.example index a7350471b..7157f212a 100644 --- a/deploy/docker-compose.override.yml.example +++ b/deploy/docker-compose.override.yml.example @@ -128,17 +128,17 @@ services: # - ./backups:/app/backups # ============================================================================= -# Scenario 6: 启用宿主机 backupd(数据管理) +# Scenario 6: 启用宿主机 datamanagementd(数据管理) # ============================================================================= # 说明: -# - backupd 运行在宿主机(systemd 或手动) -# - 主进程固定探测 /tmp/sub2api-backup.sock +# - datamanagementd 运行在宿主机(systemd 或手动) +# - 主进程固定探测 /tmp/sub2api-datamanagement.sock # - 需要把宿主机 socket 挂载到容器内同路径 # # services: # sub2api: # volumes: -# - /tmp/sub2api-backup.sock:/tmp/sub2api-backup.sock +# - /tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock # ============================================================================= # Additional Notes diff --git a/deploy/install-backupd.sh b/deploy/install-datamanagementd.sh similarity index 62% rename from deploy/install-backupd.sh rename to deploy/install-datamanagementd.sh index 340ee396f..8d53134bf 100755 --- a/deploy/install-backupd.sh +++ b/deploy/install-datamanagementd.sh @@ -3,29 +3,29 @@ set -euo pipefail # 用法: -# sudo ./install-backupd.sh --binary /path/to/backupd +# sudo ./install-datamanagementd.sh --binary /path/to/datamanagementd # 或: -# sudo ./install-backupd.sh --source /path/to/sub2api/repo +# sudo ./install-datamanagementd.sh --source /path/to/sub2api/repo BIN_PATH="" SOURCE_PATH="" INSTALL_DIR="/opt/sub2api" -DATA_DIR="/var/lib/sub2api/backup" -SERVICE_FILE_NAME="sub2api-backupd.service" +DATA_DIR="/var/lib/sub2api/datamanagement" +SERVICE_FILE_NAME="sub2api-datamanagementd.service" function print_help() { cat <<'EOF' 用法: - install-backupd.sh [--binary ] [--source <仓库路径>] + install-datamanagementd.sh [--binary ] [--source <仓库路径>] 参数: - --binary 指定已构建的 backupd 二进制路径 + --binary 指定已构建的 datamanagementd 二进制路径 --source 指定 sub2api 仓库路径(脚本会执行 go build) -h, --help 显示帮助 示例: - sudo ./install-backupd.sh --binary ./backup/backupd - sudo ./install-backupd.sh --source /opt/sub2api-src + sudo ./install-datamanagementd.sh --binary ./datamanagement/datamanagementd + sudo ./install-datamanagementd.sh --source /opt/sub2api-src EOF } @@ -67,13 +67,13 @@ if [[ "$(id -u)" -ne 0 ]]; then fi if [[ -n "$SOURCE_PATH" ]]; then - if [[ ! -d "$SOURCE_PATH/backup" ]]; then - echo "错误: 无效仓库路径,未找到 $SOURCE_PATH/backup" + if [[ ! -d "$SOURCE_PATH/datamanagement" ]]; then + echo "错误: 无效仓库路径,未找到 $SOURCE_PATH/datamanagement" exit 1 fi - echo "[1/6] 从源码构建 backupd..." - (cd "$SOURCE_PATH/backup" && go build -o backupd ./cmd/backupd) - BIN_PATH="$SOURCE_PATH/backup/backupd" + echo "[1/6] 从源码构建 datamanagementd..." + (cd "$SOURCE_PATH/datamanagement" && go build -o datamanagementd ./cmd/datamanagementd) + BIN_PATH="$SOURCE_PATH/datamanagement/datamanagementd" fi if [[ ! -f "$BIN_PATH" ]]; then @@ -88,9 +88,9 @@ else echo "[2/6] 系统用户 sub2api 已存在,跳过创建" fi -echo "[3/6] 安装 backupd 二进制..." +echo "[3/6] 安装 datamanagementd 二进制..." mkdir -p "$INSTALL_DIR" -install -m 0755 "$BIN_PATH" "$INSTALL_DIR/backupd" +install -m 0755 "$BIN_PATH" "$INSTALL_DIR/datamanagementd" echo "[4/6] 准备数据目录..." mkdir -p "$DATA_DIR" @@ -107,17 +107,17 @@ fi echo "[5/6] 安装 systemd 服务..." cp "$SERVICE_TEMPLATE" "/etc/systemd/system/$SERVICE_FILE_NAME" systemctl daemon-reload -systemctl enable --now sub2api-backupd +systemctl enable --now sub2api-datamanagementd echo "[6/6] 完成,当前状态:" -systemctl --no-pager --full status sub2api-backupd || true +systemctl --no-pager --full status sub2api-datamanagementd || true cat <<'EOF' 下一步建议: -1. 查看日志:sudo journalctl -u sub2api-backupd -f +1. 查看日志:sudo journalctl -u sub2api-datamanagementd -f 2. 在 sub2api(容器部署时)挂载 socket: - /tmp/sub2api-backup.sock:/tmp/sub2api-backup.sock + /tmp/sub2api-datamanagement.sock:/tmp/sub2api-datamanagement.sock 3. 进入管理后台“数据管理”页面确认 agent=enabled EOF diff --git a/deploy/sub2api-backupd.service b/deploy/sub2api-datamanagementd.service similarity index 57% rename from deploy/sub2api-backupd.service rename to deploy/sub2api-datamanagementd.service index 52d91f2da..b32733b7a 100644 --- a/deploy/sub2api-backupd.service +++ b/deploy/sub2api-datamanagementd.service @@ -1,5 +1,5 @@ [Unit] -Description=Sub2API Backup Daemon +Description=Sub2API Data Management Daemon After=network.target Wants=network.target @@ -8,9 +8,9 @@ Type=simple User=sub2api Group=sub2api WorkingDirectory=/opt/sub2api -ExecStart=/opt/sub2api/backupd \ - -socket-path /tmp/sub2api-backup.sock \ - -sqlite-path /var/lib/sub2api/backup/backupd.db \ +ExecStart=/opt/sub2api/datamanagementd \ + -socket-path /tmp/sub2api-datamanagement.sock \ + -sqlite-path /var/lib/sub2api/datamanagement/datamanagementd.db \ -version 1.0.0 Restart=always RestartSec=5s diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 90fc3374b..8620ac7d4 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -842,18 +842,20 @@ export default { dataManagement: { title: 'Data Management', - description: 'Manage backup agent status, object storage settings, and backup jobs in one place', + description: 'Manage data management agent status, object storage settings, and backup jobs in one place', agent: { - title: 'Backup Agent Status', + title: 'Data Management Agent Status', description: 'The system probes a fixed Unix socket and enables data management only when reachable.', - enabled: 'Backup agent is ready. Data management operations are available.', - disabled: 'Backup agent is unavailable. Only diagnostic information is available now.', + enabled: 'Data management agent is ready. Data management operations are available.', + disabled: 'Data management agent is unavailable. Only diagnostic information is available now.', socketPath: 'Socket Path', version: 'Version', status: 'Status', uptime: 'Uptime', reasonLabel: 'Unavailable Reason', reason: { + DATA_MANAGEMENT_AGENT_SOCKET_MISSING: 'Data management socket file is missing', + DATA_MANAGEMENT_AGENT_UNAVAILABLE: 'Data management agent is unreachable', BACKUP_AGENT_SOCKET_MISSING: 'Backup socket file is missing', BACKUP_AGENT_UNAVAILABLE: 'Backup agent is unreachable', UNKNOWN: 'Unknown reason' @@ -982,7 +984,7 @@ export default { }, actions: { refresh: 'Refresh Status', - disabledHint: 'Start backupd first and ensure the socket is reachable.', + disabledHint: 'Start datamanagementd first and ensure the socket is reachable.', reloadConfig: 'Reload Config', reloadSourceProfiles: 'Reload Source Profiles', reloadProfiles: 'Reload Profiles', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index dba7a876d..94b350424 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -865,18 +865,20 @@ export default { dataManagement: { title: '数据管理', - description: '统一管理备份代理状态、对象存储配置和备份任务', + description: '统一管理数据管理代理状态、对象存储配置和备份任务', agent: { - title: '备份代理状态', + title: '数据管理代理状态', description: '系统会自动探测固定 Unix Socket,仅在可连通时启用数据管理功能。', - enabled: '备份代理已就绪,可继续进行数据管理操作。', - disabled: '备份代理不可用,当前仅可查看诊断信息。', + enabled: '数据管理代理已就绪,可继续进行数据管理操作。', + disabled: '数据管理代理不可用,当前仅可查看诊断信息。', socketPath: 'Socket 路径', version: '版本', status: '状态', uptime: '运行时长', reasonLabel: '不可用原因', reason: { + DATA_MANAGEMENT_AGENT_SOCKET_MISSING: '未检测到数据管理 Socket 文件', + DATA_MANAGEMENT_AGENT_UNAVAILABLE: '数据管理代理不可连通', BACKUP_AGENT_SOCKET_MISSING: '未检测到备份 Socket 文件', BACKUP_AGENT_UNAVAILABLE: '备份代理不可连通', UNKNOWN: '未知原因' @@ -1005,7 +1007,7 @@ export default { }, actions: { refresh: '刷新状态', - disabledHint: '请先启动 backupd 并确认 Socket 可连通。', + disabledHint: '请先启动 datamanagementd 并确认 Socket 可连通。', reloadConfig: '加载配置', reloadSourceProfiles: '刷新数据源配置', reloadProfiles: '刷新账号列表', diff --git a/frontend/src/views/admin/DataManagementView.vue b/frontend/src/views/admin/DataManagementView.vue index 5fba5973a..468202701 100644 --- a/frontend/src/views/admin/DataManagementView.vue +++ b/frontend/src/views/admin/DataManagementView.vue @@ -755,8 +755,8 @@ const sourceDrawerOpen = ref(false) const health = ref({ enabled: false, - reason: 'BACKUP_AGENT_SOCKET_MISSING', - socket_path: '/tmp/sub2api-backup.sock' + reason: 'DATA_MANAGEMENT_AGENT_SOCKET_MISSING', + socket_path: '/tmp/sub2api-datamanagement.sock' }) const config = ref(newDefaultConfig()) @@ -826,6 +826,9 @@ const reasonMessage = computed(() => { } const reasonKeyMap: Record = { + DATA_MANAGEMENT_AGENT_SOCKET_MISSING: 'admin.dataManagement.agent.reason.DATA_MANAGEMENT_AGENT_SOCKET_MISSING', + DATA_MANAGEMENT_AGENT_UNAVAILABLE: 'admin.dataManagement.agent.reason.DATA_MANAGEMENT_AGENT_UNAVAILABLE', + // 向后兼容旧 reason code BACKUP_AGENT_SOCKET_MISSING: 'admin.dataManagement.agent.reason.BACKUP_AGENT_SOCKET_MISSING', BACKUP_AGENT_UNAVAILABLE: 'admin.dataManagement.agent.reason.BACKUP_AGENT_UNAVAILABLE' } @@ -1389,7 +1392,7 @@ function syncProfileFormWithSelection() { function newDefaultConfig(): DataManagementConfig { return { source_mode: 'direct', - backup_root: '/var/lib/sub2api/backups', + backup_root: '/var/lib/sub2api/datamanagement', retention_days: 7, keep_last: 30, active_postgres_profile_id: '', From ec6092a86f95f1ef2fe7432e3440f8424cdc08ef Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 16:09:11 +0800 Subject: [PATCH 006/120] =?UTF-8?q?chore:=20=E6=9B=B4=E6=96=B0=E7=89=88?= =?UTF-8?q?=E6=9C=AC=E5=8F=B7=E8=87=B3=200.1.85.1?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index c0c68bab1..e1f56a4f6 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.85 +0.1.85.1 From 14bc98a9f6b8fd1c3e13472542ddf72a9b850fc1 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 21:27:30 +0800 Subject: [PATCH 007/120] =?UTF-8?q?feat(gateway):=20=E5=BC=BA=E5=8C=96=20O?= =?UTF-8?q?penAI=20WS=20=E4=B8=BB=E9=93=BE=E8=B7=AF=E5=B9=B6=E7=A7=BB?= =?UTF-8?q?=E9=99=A4=20HTTP=20=E5=9B=9E=E9=80=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/config/config.go | 83 +- backend/internal/config/config_test.go | 49 +- backend/internal/handler/auth_handler.go | 5 +- backend/internal/service/auth_service.go | 11 + .../auth_service_turnstile_register_test.go | 96 ++ .../service/openai_gateway_service.go | 377 +++++- .../service/openai_oauth_passthrough_test.go | 2 +- .../internal/service/openai_tool_corrector.go | 60 +- .../service/openai_tool_corrector_test.go | 9 + backend/internal/service/openai_ws_client.go | 175 ++- .../internal/service/openai_ws_client_test.go | 96 ++ .../service/openai_ws_fallback_test.go | 89 +- .../internal/service/openai_ws_forwarder.go | 1056 ++++++++++++++++- .../openai_ws_forwarder_benchmark_test.go | 77 ++ .../openai_ws_forwarder_retry_payload_test.go | 50 + .../openai_ws_forwarder_success_test.go | 451 ++++++- backend/internal/service/openai_ws_pool.go | 417 +++++-- .../service/openai_ws_pool_benchmark_test.go | 58 + .../internal/service/openai_ws_pool_test.go | 293 ++++- .../openai_ws_protocol_forward_test.go | 451 ++++++- .../internal/service/openai_ws_state_store.go | 134 ++- .../service/openai_ws_state_store_test.go | 53 + .../internal/service/ops_upstream_context.go | 1 + deploy/.env.example | 10 +- deploy/config.example.yaml | 38 +- tools/perf/openai_ws_v2_perf_suite_k6.js | 216 ++++ 26 files changed, 4072 insertions(+), 285 deletions(-) create mode 100644 backend/internal/service/auth_service_turnstile_register_test.go create mode 100644 backend/internal/service/openai_ws_client_test.go create mode 100644 backend/internal/service/openai_ws_forwarder_benchmark_test.go create mode 100644 backend/internal/service/openai_ws_forwarder_retry_payload_test.go create mode 100644 backend/internal/service/openai_ws_pool_benchmark_test.go create mode 100644 tools/perf/openai_ws_v2_perf_suite_k6.js diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 468059721..4b5c1729b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -364,7 +364,7 @@ type GatewayConfig struct { // OpenAIPassthroughAllowTimeoutHeaders: OpenAI 透传模式是否放行客户端超时头 // 关闭(默认)可避免 x-stainless-timeout 等头导致上游提前断流。 OpenAIPassthroughAllowTimeoutHeaders bool `mapstructure:"openai_passthrough_allow_timeout_headers"` - // OpenAIWS: OpenAI Responses WebSocket 配置(默认关闭,不影响现有 HTTP 线路) + // OpenAIWS: OpenAI Responses WebSocket 配置(默认开启,可按需回滚到 HTTP) OpenAIWS GatewayOpenAIWSConfig `mapstructure:"openai_ws"` // HTTP 上游连接池配置(性能优化:支持高并发场景调优) @@ -453,9 +453,9 @@ type GatewayConfig struct { } // GatewayOpenAIWSConfig OpenAI Responses WebSocket 配置。 -// 注意:默认全局关闭,确保现网仍走现有 HTTP/SSE 链路。 +// 注意:默认全局开启;如需回滚可使用 force_http 或关闭 enabled。 type GatewayOpenAIWSConfig struct { - // Enabled: 全局总开关(默认 false) + // Enabled: 全局总开关(默认 true) Enabled bool `mapstructure:"enabled"` // OAuthEnabled: 是否允许 OpenAI OAuth 账号使用 WS OAuthEnabled bool `mapstructure:"oauth_enabled"` @@ -465,6 +465,14 @@ type GatewayOpenAIWSConfig struct { ForceHTTP bool `mapstructure:"force_http"` // AllowStoreRecovery: 允许在 WSv2 下按策略恢复 store=true(默认 false) AllowStoreRecovery bool `mapstructure:"allow_store_recovery"` + // StoreDisabledConnMode: store=false 且无可复用会话连接时的建连策略(strict/adaptive/off) + // - strict: 强制新建连接(隔离优先) + // - adaptive: 仅在高风险失败后强制新建连接(性能与隔离折中) + // - off: 不强制新建连接(复用优先) + StoreDisabledConnMode string `mapstructure:"store_disabled_conn_mode"` + // StoreDisabledForceNewConn: store=false 且无可复用粘连连接时是否强制新建连接(默认 true,保障会话隔离) + // 兼容旧配置;当 StoreDisabledConnMode 为空时才生效。 + StoreDisabledForceNewConn bool `mapstructure:"store_disabled_force_new_conn"` // PrewarmGenerateEnabled: 是否启用 WSv2 generate=false 预热(默认 false) PrewarmGenerateEnabled bool `mapstructure:"prewarm_generate_enabled"` @@ -487,8 +495,24 @@ type GatewayOpenAIWSConfig struct { WriteTimeoutSeconds int `mapstructure:"write_timeout_seconds"` PoolTargetUtilization float64 `mapstructure:"pool_target_utilization"` QueueLimitPerConn int `mapstructure:"queue_limit_per_conn"` + // EventFlushBatchSize: WS 流式写出批量 flush 阈值(事件条数) + EventFlushBatchSize int `mapstructure:"event_flush_batch_size"` + // EventFlushIntervalMS: WS 流式写出最大等待时间(毫秒);0 表示仅按 batch 触发 + EventFlushIntervalMS int `mapstructure:"event_flush_interval_ms"` + // PrewarmCooldownMS: 连接池预热触发冷却时间(毫秒) + PrewarmCooldownMS int `mapstructure:"prewarm_cooldown_ms"` // FallbackCooldownSeconds: WS 回退冷却窗口,避免 WS/HTTP 抖动;0 表示关闭冷却 FallbackCooldownSeconds int `mapstructure:"fallback_cooldown_seconds"` + // RetryBackoffInitialMS: WS 重试初始退避(毫秒);<=0 表示关闭退避 + RetryBackoffInitialMS int `mapstructure:"retry_backoff_initial_ms"` + // RetryBackoffMaxMS: WS 重试最大退避(毫秒) + RetryBackoffMaxMS int `mapstructure:"retry_backoff_max_ms"` + // RetryJitterRatio: WS 重试退避抖动比例(0-1) + RetryJitterRatio float64 `mapstructure:"retry_jitter_ratio"` + // RetryTotalBudgetMS: WS 单次请求重试总预算(毫秒);0 表示关闭预算限制 + RetryTotalBudgetMS int `mapstructure:"retry_total_budget_ms"` + // PayloadLogSampleRate: payload_schema 日志采样率(0-1) + PayloadLogSampleRate float64 `mapstructure:"payload_log_sample_rate"` // 账号调度与粘连参数 LBTopK int `mapstructure:"lb_top_k"` @@ -1012,7 +1036,7 @@ func setDefaults() { viper.SetDefault("server.read_header_timeout", 30) // 30秒读取请求头 viper.SetDefault("server.idle_timeout", 120) // 120秒空闲超时 viper.SetDefault("server.trusted_proxies", []string{}) - viper.SetDefault("server.max_request_body_size", int64(100*1024*1024)) + viper.SetDefault("server.max_request_body_size", int64(256*1024*1024)) // H2C 默认配置 viper.SetDefault("server.h2c.enabled", false) viper.SetDefault("server.h2c.max_concurrent_streams", uint32(50)) // 50 个并发流 @@ -1224,12 +1248,14 @@ func setDefaults() { viper.SetDefault("gateway.max_account_switches_gemini", 3) viper.SetDefault("gateway.force_codex_cli", false) viper.SetDefault("gateway.openai_passthrough_allow_timeout_headers", false) - // OpenAI Responses WebSocket(默认关闭,不影响现网 HTTP 线路) - viper.SetDefault("gateway.openai_ws.enabled", false) + // OpenAI Responses WebSocket(默认开启;可通过 force_http 紧急回滚) + viper.SetDefault("gateway.openai_ws.enabled", true) viper.SetDefault("gateway.openai_ws.oauth_enabled", true) viper.SetDefault("gateway.openai_ws.apikey_enabled", true) viper.SetDefault("gateway.openai_ws.force_http", false) viper.SetDefault("gateway.openai_ws.allow_store_recovery", false) + viper.SetDefault("gateway.openai_ws.store_disabled_conn_mode", "strict") + viper.SetDefault("gateway.openai_ws.store_disabled_force_new_conn", true) viper.SetDefault("gateway.openai_ws.prewarm_generate_enabled", false) viper.SetDefault("gateway.openai_ws.responses_websockets", false) viper.SetDefault("gateway.openai_ws.responses_websockets_v2", true) @@ -1244,7 +1270,15 @@ func setDefaults() { viper.SetDefault("gateway.openai_ws.write_timeout_seconds", 120) viper.SetDefault("gateway.openai_ws.pool_target_utilization", 0.7) viper.SetDefault("gateway.openai_ws.queue_limit_per_conn", 256) + viper.SetDefault("gateway.openai_ws.event_flush_batch_size", 4) + viper.SetDefault("gateway.openai_ws.event_flush_interval_ms", 25) + viper.SetDefault("gateway.openai_ws.prewarm_cooldown_ms", 300) viper.SetDefault("gateway.openai_ws.fallback_cooldown_seconds", 30) + viper.SetDefault("gateway.openai_ws.retry_backoff_initial_ms", 120) + viper.SetDefault("gateway.openai_ws.retry_backoff_max_ms", 2000) + viper.SetDefault("gateway.openai_ws.retry_jitter_ratio", 0.2) + viper.SetDefault("gateway.openai_ws.retry_total_budget_ms", 0) + viper.SetDefault("gateway.openai_ws.payload_log_sample_rate", 0.2) viper.SetDefault("gateway.openai_ws.lb_top_k", 3) viper.SetDefault("gateway.openai_ws.sticky_session_ttl_seconds", 3600) viper.SetDefault("gateway.openai_ws.sticky_response_id_ttl_seconds", 3600) @@ -1256,7 +1290,7 @@ func setDefaults() { viper.SetDefault("gateway.openai_ws.scheduler_score_weights.ttft", 0.5) viper.SetDefault("gateway.antigravity_fallback_cooldown_minutes", 1) viper.SetDefault("gateway.antigravity_extra_retries", 10) - viper.SetDefault("gateway.max_body_size", int64(100*1024*1024)) + viper.SetDefault("gateway.max_body_size", int64(256*1024*1024)) viper.SetDefault("gateway.upstream_response_read_max_bytes", int64(8*1024*1024)) viper.SetDefault("gateway.proxy_probe_response_read_max_bytes", int64(1024*1024)) viper.SetDefault("gateway.gemini_debug_response_headers", false) @@ -1884,9 +1918,44 @@ func (c *Config) Validate() error { if c.Gateway.OpenAIWS.QueueLimitPerConn <= 0 { return fmt.Errorf("gateway.openai_ws.queue_limit_per_conn must be positive") } + if c.Gateway.OpenAIWS.EventFlushBatchSize <= 0 { + return fmt.Errorf("gateway.openai_ws.event_flush_batch_size must be positive") + } + if c.Gateway.OpenAIWS.EventFlushIntervalMS < 0 { + return fmt.Errorf("gateway.openai_ws.event_flush_interval_ms must be non-negative") + } + if c.Gateway.OpenAIWS.PrewarmCooldownMS < 0 { + return fmt.Errorf("gateway.openai_ws.prewarm_cooldown_ms must be non-negative") + } if c.Gateway.OpenAIWS.FallbackCooldownSeconds < 0 { return fmt.Errorf("gateway.openai_ws.fallback_cooldown_seconds must be non-negative") } + if c.Gateway.OpenAIWS.RetryBackoffInitialMS < 0 { + return fmt.Errorf("gateway.openai_ws.retry_backoff_initial_ms must be non-negative") + } + if c.Gateway.OpenAIWS.RetryBackoffMaxMS < 0 { + return fmt.Errorf("gateway.openai_ws.retry_backoff_max_ms must be non-negative") + } + if c.Gateway.OpenAIWS.RetryBackoffInitialMS > 0 && c.Gateway.OpenAIWS.RetryBackoffMaxMS > 0 && + c.Gateway.OpenAIWS.RetryBackoffMaxMS < c.Gateway.OpenAIWS.RetryBackoffInitialMS { + return fmt.Errorf("gateway.openai_ws.retry_backoff_max_ms must be >= retry_backoff_initial_ms") + } + if c.Gateway.OpenAIWS.RetryJitterRatio < 0 || c.Gateway.OpenAIWS.RetryJitterRatio > 1 { + return fmt.Errorf("gateway.openai_ws.retry_jitter_ratio must be within [0,1]") + } + if c.Gateway.OpenAIWS.RetryTotalBudgetMS < 0 { + return fmt.Errorf("gateway.openai_ws.retry_total_budget_ms must be non-negative") + } + if mode := strings.ToLower(strings.TrimSpace(c.Gateway.OpenAIWS.StoreDisabledConnMode)); mode != "" { + switch mode { + case "strict", "adaptive", "off": + default: + return fmt.Errorf("gateway.openai_ws.store_disabled_conn_mode must be one of strict|adaptive|off") + } + } + if c.Gateway.OpenAIWS.PayloadLogSampleRate < 0 || c.Gateway.OpenAIWS.PayloadLogSampleRate > 1 { + return fmt.Errorf("gateway.openai_ws.payload_log_sample_rate must be within [0,1]") + } if c.Gateway.OpenAIWS.LBTopK <= 0 { return fmt.Errorf("gateway.openai_ws.lb_top_k must be positive") } diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index 77edbd024..e3db2b063 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -84,8 +84,8 @@ func TestLoadDefaultOpenAIWSConfig(t *testing.T) { t.Fatalf("Load() error: %v", err) } - if cfg.Gateway.OpenAIWS.Enabled { - t.Fatalf("Gateway.OpenAIWS.Enabled = true, want false") + if !cfg.Gateway.OpenAIWS.Enabled { + t.Fatalf("Gateway.OpenAIWS.Enabled = false, want true") } if !cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 { t.Fatalf("Gateway.OpenAIWS.ResponsesWebsocketsV2 = false, want true") @@ -111,6 +111,36 @@ func TestLoadDefaultOpenAIWSConfig(t *testing.T) { if cfg.Gateway.OpenAIWS.FallbackCooldownSeconds != 30 { t.Fatalf("Gateway.OpenAIWS.FallbackCooldownSeconds = %d, want 30", cfg.Gateway.OpenAIWS.FallbackCooldownSeconds) } + if cfg.Gateway.OpenAIWS.EventFlushBatchSize != 4 { + t.Fatalf("Gateway.OpenAIWS.EventFlushBatchSize = %d, want 4", cfg.Gateway.OpenAIWS.EventFlushBatchSize) + } + if cfg.Gateway.OpenAIWS.EventFlushIntervalMS != 25 { + t.Fatalf("Gateway.OpenAIWS.EventFlushIntervalMS = %d, want 25", cfg.Gateway.OpenAIWS.EventFlushIntervalMS) + } + if cfg.Gateway.OpenAIWS.PrewarmCooldownMS != 300 { + t.Fatalf("Gateway.OpenAIWS.PrewarmCooldownMS = %d, want 300", cfg.Gateway.OpenAIWS.PrewarmCooldownMS) + } + if cfg.Gateway.OpenAIWS.RetryBackoffInitialMS != 120 { + t.Fatalf("Gateway.OpenAIWS.RetryBackoffInitialMS = %d, want 120", cfg.Gateway.OpenAIWS.RetryBackoffInitialMS) + } + if cfg.Gateway.OpenAIWS.RetryBackoffMaxMS != 2000 { + t.Fatalf("Gateway.OpenAIWS.RetryBackoffMaxMS = %d, want 2000", cfg.Gateway.OpenAIWS.RetryBackoffMaxMS) + } + if cfg.Gateway.OpenAIWS.RetryJitterRatio != 0.2 { + t.Fatalf("Gateway.OpenAIWS.RetryJitterRatio = %v, want 0.2", cfg.Gateway.OpenAIWS.RetryJitterRatio) + } + if cfg.Gateway.OpenAIWS.RetryTotalBudgetMS != 0 { + t.Fatalf("Gateway.OpenAIWS.RetryTotalBudgetMS = %d, want 0", cfg.Gateway.OpenAIWS.RetryTotalBudgetMS) + } + if cfg.Gateway.OpenAIWS.PayloadLogSampleRate != 0.2 { + t.Fatalf("Gateway.OpenAIWS.PayloadLogSampleRate = %v, want 0.2", cfg.Gateway.OpenAIWS.PayloadLogSampleRate) + } + if !cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn { + t.Fatalf("Gateway.OpenAIWS.StoreDisabledForceNewConn = false, want true") + } + if cfg.Gateway.OpenAIWS.StoreDisabledConnMode != "strict" { + t.Fatalf("Gateway.OpenAIWS.StoreDisabledConnMode = %q, want %q", cfg.Gateway.OpenAIWS.StoreDisabledConnMode, "strict") + } } func TestLoadOpenAIWSStickyTTLCompatibility(t *testing.T) { @@ -1321,6 +1351,21 @@ func TestValidateConfig_OpenAIWSRules(t *testing.T) { mutate: func(c *Config) { c.Gateway.OpenAIWS.FallbackCooldownSeconds = -1 }, wantErr: "gateway.openai_ws.fallback_cooldown_seconds", }, + { + name: "store_disabled_conn_mode 必须为 strict|adaptive|off", + mutate: func(c *Config) { c.Gateway.OpenAIWS.StoreDisabledConnMode = "invalid" }, + wantErr: "gateway.openai_ws.store_disabled_conn_mode", + }, + { + name: "payload_log_sample_rate 必须在 [0,1] 范围内", + mutate: func(c *Config) { c.Gateway.OpenAIWS.PayloadLogSampleRate = 1.2 }, + wantErr: "gateway.openai_ws.payload_log_sample_rate", + }, + { + name: "retry_total_budget_ms 不能为负数", + mutate: func(c *Config) { c.Gateway.OpenAIWS.RetryTotalBudgetMS = -1 }, + wantErr: "gateway.openai_ws.retry_total_budget_ms", + }, { name: "lb_top_k 必须为正数", mutate: func(c *Config) { c.Gateway.OpenAIWS.LBTopK = 0 }, diff --git a/backend/internal/handler/auth_handler.go b/backend/internal/handler/auth_handler.go index e0078e147..1ffa9d717 100644 --- a/backend/internal/handler/auth_handler.go +++ b/backend/internal/handler/auth_handler.go @@ -113,9 +113,8 @@ func (h *AuthHandler) Register(c *gin.Context) { return } - // Turnstile 验证 — 始终执行,防止绕过 - // TODO: 确认前端在提交邮箱验证码注册时也传递了 turnstile_token - if err := h.authService.VerifyTurnstile(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c)); err != nil { + // Turnstile 验证(邮箱验证码注册场景避免重复校验一次性 token) + if err := h.authService.VerifyTurnstileForRegister(c.Request.Context(), req.TurnstileToken, ip.GetClientIP(c), req.VerifyCode); err != nil { response.ErrorFrom(c, err) return } diff --git a/backend/internal/service/auth_service.go b/backend/internal/service/auth_service.go index 73f59dd09..eae7bd539 100644 --- a/backend/internal/service/auth_service.go +++ b/backend/internal/service/auth_service.go @@ -308,6 +308,17 @@ func (s *AuthService) SendVerifyCodeAsync(ctx context.Context, email string) (*S }, nil } +// VerifyTurnstileForRegister 在注册场景下验证 Turnstile。 +// 当邮箱验证开启且已提交验证码时,说明验证码发送阶段已完成 Turnstile 校验, +// 此处跳过二次校验,避免一次性 token 在注册提交时重复使用导致误报失败。 +func (s *AuthService) VerifyTurnstileForRegister(ctx context.Context, token, remoteIP, verifyCode string) error { + if s.IsEmailVerifyEnabled(ctx) && strings.TrimSpace(verifyCode) != "" { + logger.LegacyPrintf("service.auth", "%s", "[Auth] Email verify flow detected, skip duplicate Turnstile check on register") + return nil + } + return s.VerifyTurnstile(ctx, token, remoteIP) +} + // VerifyTurnstile 验证Turnstile token func (s *AuthService) VerifyTurnstile(ctx context.Context, token string, remoteIP string) error { required := s.cfg != nil && s.cfg.Server.Mode == "release" && s.cfg.Turnstile.Required diff --git a/backend/internal/service/auth_service_turnstile_register_test.go b/backend/internal/service/auth_service_turnstile_register_test.go new file mode 100644 index 000000000..7dd9edca8 --- /dev/null +++ b/backend/internal/service/auth_service_turnstile_register_test.go @@ -0,0 +1,96 @@ +//go:build unit + +package service + +import ( + "context" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type turnstileVerifierSpy struct { + called int + lastToken string + result *TurnstileVerifyResponse + err error +} + +func (s *turnstileVerifierSpy) VerifyToken(_ context.Context, _ string, token, _ string) (*TurnstileVerifyResponse, error) { + s.called++ + s.lastToken = token + if s.err != nil { + return nil, s.err + } + if s.result != nil { + return s.result, nil + } + return &TurnstileVerifyResponse{Success: true}, nil +} + +func newAuthServiceForRegisterTurnstileTest(settings map[string]string, verifier TurnstileVerifier) *AuthService { + cfg := &config.Config{ + Server: config.ServerConfig{ + Mode: "release", + }, + Turnstile: config.TurnstileConfig{ + Required: true, + }, + } + + settingService := NewSettingService(&settingRepoStub{values: settings}, cfg) + turnstileService := NewTurnstileService(settingService, verifier) + + return NewAuthService( + &userRepoStub{}, + nil, // redeemRepo + nil, // refreshTokenCache + cfg, + settingService, + nil, // emailService + turnstileService, + nil, // emailQueueService + nil, // promoService + ) +} + +func TestAuthService_VerifyTurnstileForRegister_SkipWhenEmailVerifyCodeProvided(t *testing.T) { + verifier := &turnstileVerifierSpy{} + service := newAuthServiceForRegisterTurnstileTest(map[string]string{ + SettingKeyEmailVerifyEnabled: "true", + SettingKeyTurnstileEnabled: "true", + SettingKeyTurnstileSecretKey: "secret", + SettingKeyRegistrationEnabled: "true", + }, verifier) + + err := service.VerifyTurnstileForRegister(context.Background(), "", "127.0.0.1", "123456") + require.NoError(t, err) + require.Equal(t, 0, verifier.called) +} + +func TestAuthService_VerifyTurnstileForRegister_RequireWhenVerifyCodeMissing(t *testing.T) { + verifier := &turnstileVerifierSpy{} + service := newAuthServiceForRegisterTurnstileTest(map[string]string{ + SettingKeyEmailVerifyEnabled: "true", + SettingKeyTurnstileEnabled: "true", + SettingKeyTurnstileSecretKey: "secret", + }, verifier) + + err := service.VerifyTurnstileForRegister(context.Background(), "", "127.0.0.1", "") + require.ErrorIs(t, err, ErrTurnstileVerificationFailed) +} + +func TestAuthService_VerifyTurnstileForRegister_NoSkipWhenEmailVerifyDisabled(t *testing.T) { + verifier := &turnstileVerifierSpy{} + service := newAuthServiceForRegisterTurnstileTest(map[string]string{ + SettingKeyEmailVerifyEnabled: "false", + SettingKeyTurnstileEnabled: "true", + SettingKeyTurnstileSecretKey: "secret", + }, verifier) + + err := service.VerifyTurnstileForRegister(context.Background(), "turnstile-token", "127.0.0.1", "123456") + require.NoError(t, err) + require.Equal(t, 1, verifier.called) + require.Equal(t, "turnstile-token", verifier.lastToken) +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index da818de49..820e0e0d3 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -10,6 +10,7 @@ import ( "errors" "fmt" "io" + "math/rand" "net/http" "sort" "strconv" @@ -35,12 +36,19 @@ const ( // OpenAI Platform API for API Key accounts (fallback) openaiPlatformAPIURL = "https://api.openai.com/v1/responses" openaiStickySessionTTL = time.Hour // 粘性会话TTL - codexCLIUserAgent = "codex_cli_rs/0.98.0" + codexCLIUserAgent = "codex_cli_rs/0.104.0" // codex_cli_only 拒绝时单个请求头日志长度上限(字符) codexCLIOnlyHeaderValueMaxBytes = 256 // OpenAIParsedRequestBodyKey 缓存 handler 侧已解析的请求体,避免重复解析。 OpenAIParsedRequestBodyKey = "openai_parsed_request_body" + // OpenAI WS Mode 失败后的重连次数上限(不含首次尝试)。 + // 与 Codex 客户端保持一致:失败后最多重连 5 次。 + openAIWSReconnectRetryLimit = 5 + // OpenAI WS Mode 重连退避默认值(可由配置覆盖)。 + openAIWSRetryBackoffInitialDefault = 120 * time.Millisecond + openAIWSRetryBackoffMaxDefault = 2 * time.Second + openAIWSRetryJitterRatioDefault = 0.2 ) // OpenAI allowed headers whitelist (for non-passthrough). @@ -205,6 +213,20 @@ type OpenAIForwardResult struct { FirstTokenMs *int } +type OpenAIWSRetryMetricsSnapshot struct { + RetryAttemptsTotal int64 `json:"retry_attempts_total"` + RetryBackoffMsTotal int64 `json:"retry_backoff_ms_total"` + RetryExhaustedTotal int64 `json:"retry_exhausted_total"` + NonRetryableFastFallbackTotal int64 `json:"non_retryable_fast_fallback_total"` +} + +type openAIWSRetryMetrics struct { + retryAttempts atomic.Int64 + retryBackoffMs atomic.Int64 + retryExhausted atomic.Int64 + nonRetryableFastFallback atomic.Int64 +} + // OpenAIGatewayService handles OpenAI API gateway operations type OpenAIGatewayService struct { accountRepo AccountRepository @@ -233,6 +255,7 @@ type OpenAIGatewayService struct { openaiWSFallbackMu sync.Mutex openaiWSFallbackUntil map[int64]time.Time + openaiWSRetryMetrics openAIWSRetryMetrics } // NewOpenAIGatewayService creates a new OpenAIGatewayService @@ -252,7 +275,7 @@ func NewOpenAIGatewayService( deferredService *DeferredService, openAITokenProvider *OpenAITokenProvider, ) *OpenAIGatewayService { - return &OpenAIGatewayService{ + svc := &OpenAIGatewayService{ accountRepo: accountRepo, usageLogRepo: usageLogRepo, userRepo: userRepo, @@ -271,6 +294,33 @@ func NewOpenAIGatewayService( toolCorrector: NewCodexToolCorrector(), openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), } + svc.logOpenAIWSModeBootstrap() + return svc +} + +func (s *OpenAIGatewayService) logOpenAIWSModeBootstrap() { + if s == nil || s.cfg == nil { + return + } + wsCfg := s.cfg.Gateway.OpenAIWS + logOpenAIWSModeInfo( + "bootstrap enabled=%v oauth_enabled=%v apikey_enabled=%v force_http=%v responses_websockets_v2=%v responses_websockets=%v payload_log_sample_rate=%.3f event_flush_batch_size=%d event_flush_interval_ms=%d prewarm_cooldown_ms=%d retry_backoff_initial_ms=%d retry_backoff_max_ms=%d retry_jitter_ratio=%.3f retry_total_budget_ms=%d ws_read_limit_bytes=%d", + wsCfg.Enabled, + wsCfg.OAuthEnabled, + wsCfg.APIKeyEnabled, + wsCfg.ForceHTTP, + wsCfg.ResponsesWebsocketsV2, + wsCfg.ResponsesWebsockets, + wsCfg.PayloadLogSampleRate, + wsCfg.EventFlushBatchSize, + wsCfg.EventFlushIntervalMS, + wsCfg.PrewarmCooldownMS, + wsCfg.RetryBackoffInitialMS, + wsCfg.RetryBackoffMaxMS, + wsCfg.RetryJitterRatio, + wsCfg.RetryTotalBudgetMS, + openAIWSMessageReadLimitBytes, + ) } func (s *OpenAIGatewayService) getCodexClientRestrictionDetector() CodexClientRestrictionDetector { @@ -295,6 +345,160 @@ func (s *OpenAIGatewayService) getOpenAIWSProtocolResolver() OpenAIWSProtocolRes return NewOpenAIWSProtocolResolver(cfg) } +func classifyOpenAIWSReconnectReason(err error) (string, bool) { + if err == nil { + return "", false + } + var fallbackErr *openAIWSFallbackError + if !errors.As(err, &fallbackErr) || fallbackErr == nil { + return "", false + } + reason := strings.TrimSpace(fallbackErr.Reason) + if reason == "" { + return "", false + } + + baseReason := strings.TrimPrefix(reason, "prewarm_") + + switch baseReason { + case "policy_violation", + "message_too_big", + "upgrade_required", + "ws_unsupported", + "auth_failed", + "previous_response_not_found": + return reason, false + } + + switch baseReason { + case "read_event", + "write_request", + "write", + "acquire_timeout", + "acquire_conn", + "conn_queue_full", + "dial_failed", + "upstream_5xx", + "event_error", + "error_event", + "upstream_error_event", + "ws_connection_limit_reached", + "missing_final_response": + return reason, true + default: + return reason, false + } +} + +func (s *OpenAIGatewayService) openAIWSRetryBackoff(attempt int) time.Duration { + if attempt <= 0 { + return 0 + } + + initial := openAIWSRetryBackoffInitialDefault + maxBackoff := openAIWSRetryBackoffMaxDefault + jitterRatio := openAIWSRetryJitterRatioDefault + if s != nil && s.cfg != nil { + wsCfg := s.cfg.Gateway.OpenAIWS + if wsCfg.RetryBackoffInitialMS > 0 { + initial = time.Duration(wsCfg.RetryBackoffInitialMS) * time.Millisecond + } + if wsCfg.RetryBackoffMaxMS > 0 { + maxBackoff = time.Duration(wsCfg.RetryBackoffMaxMS) * time.Millisecond + } + if wsCfg.RetryJitterRatio >= 0 { + jitterRatio = wsCfg.RetryJitterRatio + } + } + if initial <= 0 { + return 0 + } + if maxBackoff <= 0 { + maxBackoff = initial + } + if maxBackoff < initial { + maxBackoff = initial + } + if jitterRatio < 0 { + jitterRatio = 0 + } + if jitterRatio > 1 { + jitterRatio = 1 + } + + shift := attempt - 1 + if shift < 0 { + shift = 0 + } + backoff := initial + if shift > 0 { + backoff = initial * time.Duration(1< maxBackoff { + backoff = maxBackoff + } + if jitterRatio <= 0 { + return backoff + } + jitter := time.Duration(float64(backoff) * jitterRatio) + if jitter <= 0 { + return backoff + } + delta := time.Duration(rand.Int63n(int64(jitter)*2+1)) - jitter + withJitter := backoff + delta + if withJitter < 0 { + return 0 + } + return withJitter +} + +func (s *OpenAIGatewayService) openAIWSRetryTotalBudget() time.Duration { + if s != nil && s.cfg != nil { + ms := s.cfg.Gateway.OpenAIWS.RetryTotalBudgetMS + if ms <= 0 { + return 0 + } + return time.Duration(ms) * time.Millisecond + } + return 0 +} + +func (s *OpenAIGatewayService) recordOpenAIWSRetryAttempt(backoff time.Duration) { + if s == nil { + return + } + s.openaiWSRetryMetrics.retryAttempts.Add(1) + if backoff > 0 { + s.openaiWSRetryMetrics.retryBackoffMs.Add(backoff.Milliseconds()) + } +} + +func (s *OpenAIGatewayService) recordOpenAIWSRetryExhausted() { + if s == nil { + return + } + s.openaiWSRetryMetrics.retryExhausted.Add(1) +} + +func (s *OpenAIGatewayService) recordOpenAIWSNonRetryableFastFallback() { + if s == nil { + return + } + s.openaiWSRetryMetrics.nonRetryableFastFallback.Add(1) +} + +func (s *OpenAIGatewayService) SnapshotOpenAIWSRetryMetrics() OpenAIWSRetryMetricsSnapshot { + if s == nil { + return OpenAIWSRetryMetricsSnapshot{} + } + return OpenAIWSRetryMetricsSnapshot{ + RetryAttemptsTotal: s.openaiWSRetryMetrics.retryAttempts.Load(), + RetryBackoffMsTotal: s.openaiWSRetryMetrics.retryBackoffMs.Load(), + RetryExhaustedTotal: s.openaiWSRetryMetrics.retryExhausted.Load(), + NonRetryableFastFallbackTotal: s.openaiWSRetryMetrics.nonRetryableFastFallback.Load(), + } +} + func (s *OpenAIGatewayService) detectCodexClientRestriction(c *gin.Context, account *Account) CodexClientRestrictionDetectionResult { return s.getCodexClientRestrictionDetector().Detect(c, account) } @@ -1046,6 +1250,17 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco c.Set("openai_ws_transport_decision", string(wsDecision.Transport)) c.Set("openai_ws_transport_reason", wsDecision.Reason) } + if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocketV2 { + logOpenAIWSModeInfo( + "selected account_id=%d account_type=%s transport=%s reason=%s model=%s stream=%v", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(wsDecision.Transport)), + normalizeOpenAIWSLogValue(wsDecision.Reason), + reqModel, + reqStream, + ) + } // 当前仅支持 WSv2;WSv1 命中时直接返回错误,避免出现“配置可开但行为不确定”。 if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocket { if c != nil { @@ -1210,23 +1425,28 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco // Capture upstream request body for ops retry of this attempt. setOpsUpstreamRequestBody(c, body) - // 命中 WS 时优先走 WebSocket Mode;仅在“未写下游”且可恢复错误时回退 HTTP。 + // 命中 WS 时仅走 WebSocket Mode;不再自动回退 HTTP。 if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocketV2 { - if s.isOpenAIWSFallbackCooling(account.ID) { - if c != nil { - c.Set("openai_ws_fallback_cooling", true) - } - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: 0, - Kind: "ws_cooling", - Message: "openai ws fallback cooling", - }) - s.logOpenAIWSFallback(ctx, account, "fallback_cooling", nil) - } else { - wsResult, wsErr := s.forwardOpenAIWSV2( + _, hasPreviousResponseID := reqBody["previous_response_id"] + logOpenAIWSModeInfo( + "forward_start account_id=%d account_type=%s model=%s stream=%v has_previous_response_id=%v", + account.ID, + account.Type, + mappedModel, + reqStream, + hasPreviousResponseID, + ) + maxAttempts := openAIWSReconnectRetryLimit + 1 + wsAttempts := 0 + var wsResult *OpenAIForwardResult + var wsErr error + wsLastFailureReason := "" + retryBudget := s.openAIWSRetryTotalBudget() + retryStartedAt := time.Now() + wsRetryLoop: + for attempt := 1; attempt <= maxAttempts; attempt++ { + wsAttempts = attempt + wsResult, wsErr = s.forwardOpenAIWSV2( ctx, c, account, @@ -1238,27 +1458,100 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco originalModel, mappedModel, startTime, + attempt, + wsLastFailureReason, ) if wsErr == nil { - s.clearOpenAIWSFallbackCooling(account.ID) - return wsResult, nil + break } - var fallbackErr *openAIWSFallbackError - if errors.As(wsErr, &fallbackErr) && (c == nil || c.Writer == nil || !c.Writer.Written()) { - s.markOpenAIWSFallbackCooling(account.ID, fallbackErr.Reason) - appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ - Platform: account.Platform, - AccountID: account.ID, - AccountName: account.Name, - UpstreamStatusCode: 0, - Kind: "ws_fallback", - Message: fallbackErr.Reason, - }) - s.logOpenAIWSFallback(ctx, account, fallbackErr.Reason, fallbackErr.Err) - } else { - return nil, wsErr + if c != nil && c.Writer != nil && c.Writer.Written() { + break + } + + reason, retryable := classifyOpenAIWSReconnectReason(wsErr) + if reason != "" { + wsLastFailureReason = reason } + if retryable && attempt < maxAttempts { + backoff := s.openAIWSRetryBackoff(attempt) + if retryBudget > 0 && time.Since(retryStartedAt)+backoff > retryBudget { + s.recordOpenAIWSRetryExhausted() + logOpenAIWSModeInfo( + "reconnect_budget_exhausted account_id=%d attempts=%d max_retries=%d reason=%s elapsed_ms=%d budget_ms=%d", + account.ID, + attempt, + openAIWSReconnectRetryLimit, + normalizeOpenAIWSLogValue(reason), + time.Since(retryStartedAt).Milliseconds(), + retryBudget.Milliseconds(), + ) + break + } + s.recordOpenAIWSRetryAttempt(backoff) + logOpenAIWSModeInfo( + "reconnect_retry account_id=%d retry=%d max_retries=%d reason=%s backoff_ms=%d", + account.ID, + attempt, + openAIWSReconnectRetryLimit, + normalizeOpenAIWSLogValue(reason), + backoff.Milliseconds(), + ) + if backoff > 0 { + timer := time.NewTimer(backoff) + select { + case <-ctx.Done(): + if !timer.Stop() { + <-timer.C + } + wsErr = wrapOpenAIWSFallback("retry_backoff_canceled", ctx.Err()) + break wsRetryLoop + case <-timer.C: + } + } + continue + } + if retryable { + s.recordOpenAIWSRetryExhausted() + logOpenAIWSModeInfo( + "reconnect_exhausted account_id=%d attempts=%d max_retries=%d reason=%s", + account.ID, + attempt, + openAIWSReconnectRetryLimit, + normalizeOpenAIWSLogValue(reason), + ) + } else if reason != "" { + s.recordOpenAIWSNonRetryableFastFallback() + logOpenAIWSModeInfo( + "reconnect_stop account_id=%d attempt=%d reason=%s", + account.ID, + attempt, + normalizeOpenAIWSLogValue(reason), + ) + } + break + } + if wsErr == nil { + firstTokenMs := int64(0) + hasFirstTokenMs := wsResult != nil && wsResult.FirstTokenMs != nil + if hasFirstTokenMs { + firstTokenMs = int64(*wsResult.FirstTokenMs) + } + requestID := "" + if wsResult != nil { + requestID = strings.TrimSpace(wsResult.RequestID) + } + logOpenAIWSModeInfo( + "forward_succeeded account_id=%d request_id=%s stream=%v has_first_token_ms=%v first_token_ms=%d ws_attempts=%d", + account.ID, + requestID, + reqStream, + hasFirstTokenMs, + firstTokenMs, + wsAttempts, + ) + return wsResult, nil } + return nil, wsErr } // Build upstream request @@ -2473,20 +2766,24 @@ func (s *OpenAIGatewayService) correctToolCallsInResponseBody(body []byte) []byt } func (s *OpenAIGatewayService) parseSSEUsage(data string, usage *OpenAIUsage) { - if usage == nil || data == "" || data == "[DONE]" { + s.parseSSEUsageBytes([]byte(data), usage) +} + +func (s *OpenAIGatewayService) parseSSEUsageBytes(data []byte, usage *OpenAIUsage) { + if usage == nil || len(data) == 0 || bytes.Equal(data, []byte("[DONE]")) { return } // 选择性解析:仅在数据中包含 completed 事件标识时才进入字段提取。 - if !strings.Contains(data, `"response.completed"`) { + if !bytes.Contains(data, []byte(`"response.completed"`)) { return } - if gjson.Get(data, "type").String() != "response.completed" { + if gjson.GetBytes(data, "type").String() != "response.completed" { return } - usage.InputTokens = int(gjson.Get(data, "response.usage.input_tokens").Int()) - usage.OutputTokens = int(gjson.Get(data, "response.usage.output_tokens").Int()) - usage.CacheReadInputTokens = int(gjson.Get(data, "response.usage.input_tokens_details.cached_tokens").Int()) + usage.InputTokens = int(gjson.GetBytes(data, "response.usage.input_tokens").Int()) + usage.OutputTokens = int(gjson.GetBytes(data, "response.usage.output_tokens").Int()) + usage.CacheReadInputTokens = int(gjson.GetBytes(data, "response.usage.input_tokens_details.cached_tokens").Int()) } func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*OpenAIUsage, error) { diff --git a/backend/internal/service/openai_oauth_passthrough_test.go b/backend/internal/service/openai_oauth_passthrough_test.go index 7a996c260..0840d3b15 100644 --- a/backend/internal/service/openai_oauth_passthrough_test.go +++ b/backend/internal/service/openai_oauth_passthrough_test.go @@ -515,7 +515,7 @@ func TestOpenAIGatewayService_OAuthPassthrough_NonCodexUAFallbackToCodexUA(t *te require.NoError(t, err) require.Equal(t, false, gjson.GetBytes(upstream.lastBody, "store").Bool()) require.Equal(t, true, gjson.GetBytes(upstream.lastBody, "stream").Bool()) - require.Equal(t, "codex_cli_rs/0.98.0", upstream.lastReq.Header.Get("User-Agent")) + require.Equal(t, "codex_cli_rs/0.104.0", upstream.lastReq.Header.Get("User-Agent")) } func TestOpenAIGatewayService_CodexCLIOnly_RejectsNonCodexClient(t *testing.T) { diff --git a/backend/internal/service/openai_tool_corrector.go b/backend/internal/service/openai_tool_corrector.go index deec80fa6..aa811e481 100644 --- a/backend/internal/service/openai_tool_corrector.go +++ b/backend/internal/service/openai_tool_corrector.go @@ -1,6 +1,7 @@ package service import ( + "bytes" "encoding/json" "fmt" "sync" @@ -62,14 +63,57 @@ func (c *CodexToolCorrector) CorrectToolCallsInSSEData(data string) (string, boo if data == "" || data == "\n" { return data, false } + correctedBytes, corrected := c.CorrectToolCallsInSSEBytes([]byte(data)) + if !corrected { + return data, false + } + return string(correctedBytes), true +} + +// CorrectToolCallsInSSEBytes 修正 SSE JSON 数据中的工具调用(字节路径)。 +// 返回修正后的数据和是否进行了修正。 +func (c *CodexToolCorrector) CorrectToolCallsInSSEBytes(data []byte) ([]byte, bool) { + if len(bytes.TrimSpace(data)) == 0 { + return data, false + } + if !mayContainToolCallPayload(data) { + return data, false + } // 尝试解析 JSON var payload map[string]any - if err := json.Unmarshal([]byte(data), &payload); err != nil { + if err := json.Unmarshal(data, &payload); err != nil { // 不是有效的 JSON,直接返回原数据 return data, false } + corrected := c.correctToolCallsInPayload(payload) + + if !corrected { + return data, false + } + + // 序列化回 JSON + correctedBytes, err := json.Marshal(payload) + if err != nil { + logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Failed to marshal corrected data: %v", err) + return data, false + } + + return correctedBytes, true +} + +func mayContainToolCallPayload(data []byte) bool { + // 快速路径:多数 token / 文本事件不包含工具字段,避免进入 Unmarshal 热路径。 + return bytes.Contains(data, []byte(`"tool_calls"`)) || + bytes.Contains(data, []byte(`"function_call"`)) || + bytes.Contains(data, []byte(`"function":{"name"`)) +} + +func (c *CodexToolCorrector) correctToolCallsInPayload(payload map[string]any) bool { + if len(payload) == 0 { + return false + } corrected := false // 处理 tool_calls 数组 @@ -133,19 +177,7 @@ func (c *CodexToolCorrector) CorrectToolCallsInSSEData(data string) (string, boo } } } - - if !corrected { - return data, false - } - - // 序列化回 JSON - correctedBytes, err := json.Marshal(payload) - if err != nil { - logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Failed to marshal corrected data: %v", err) - return data, false - } - - return string(correctedBytes), true + return corrected } // correctToolCallsArray 修正工具调用数组中的工具名称 diff --git a/backend/internal/service/openai_tool_corrector_test.go b/backend/internal/service/openai_tool_corrector_test.go index ff518ea64..7c83de9e9 100644 --- a/backend/internal/service/openai_tool_corrector_test.go +++ b/backend/internal/service/openai_tool_corrector_test.go @@ -5,6 +5,15 @@ import ( "testing" ) +func TestMayContainToolCallPayload(t *testing.T) { + if mayContainToolCallPayload([]byte(`{"type":"response.output_text.delta","delta":"hello"}`)) { + t.Fatalf("plain text event should not trigger tool-call parsing") + } + if !mayContainToolCallPayload([]byte(`{"tool_calls":[{"function":{"name":"apply_patch"}}]}`)) { + t.Fatalf("tool_calls event should trigger tool-call parsing") + } +} + func TestCorrectToolCallsInSSEData(t *testing.T) { corrector := NewCodexToolCorrector() diff --git a/backend/internal/service/openai_ws_client.go b/backend/internal/service/openai_ws_client.go index 1fda7261d..7d2c7dd55 100644 --- a/backend/internal/service/openai_ws_client.go +++ b/backend/internal/service/openai_ws_client.go @@ -7,11 +7,29 @@ import ( "net/http" "net/url" "strings" + "sync" + "sync/atomic" + "time" coderws "github.com/coder/websocket" "github.com/coder/websocket/wsjson" ) +const openAIWSMessageReadLimitBytes int64 = 128 * 1024 * 1024 +const ( + openAIWSProxyTransportMaxIdleConns = 128 + openAIWSProxyTransportMaxIdleConnsPerHost = 64 + openAIWSProxyTransportIdleConnTimeout = 90 * time.Second + openAIWSProxyClientCacheMaxEntries = 256 + openAIWSProxyClientCacheIdleTTL = 15 * time.Minute +) + +type OpenAIWSTransportMetricsSnapshot struct { + ProxyClientCacheHits int64 `json:"proxy_client_cache_hits"` + ProxyClientCacheMisses int64 `json:"proxy_client_cache_misses"` + TransportReuseRatio float64 `json:"transport_reuse_ratio"` +} + // openAIWSClientConn 抽象 WS 客户端连接,便于替换底层实现。 type openAIWSClientConn interface { WriteJSON(ctx context.Context, value any) error @@ -25,11 +43,27 @@ type openAIWSClientDialer interface { Dial(ctx context.Context, wsURL string, headers http.Header, proxyURL string) (openAIWSClientConn, int, http.Header, error) } +type openAIWSTransportMetricsDialer interface { + SnapshotTransportMetrics() OpenAIWSTransportMetricsSnapshot +} + func newDefaultOpenAIWSClientDialer() openAIWSClientDialer { - return &coderOpenAIWSClientDialer{} + return &coderOpenAIWSClientDialer{ + proxyClients: make(map[string]*openAIWSProxyClientEntry), + } +} + +type coderOpenAIWSClientDialer struct { + proxyMu sync.Mutex + proxyClients map[string]*openAIWSProxyClientEntry + proxyHits atomic.Int64 + proxyMisses atomic.Int64 } -type coderOpenAIWSClientDialer struct{} +type openAIWSProxyClientEntry struct { + client *http.Client + lastUsedUnixNano int64 +} func (d *coderOpenAIWSClientDialer) Dial( ctx context.Context, @@ -43,18 +77,15 @@ func (d *coderOpenAIWSClientDialer) Dial( } opts := &coderws.DialOptions{ - HTTPHeader: cloneHeader(headers), + HTTPHeader: cloneHeader(headers), + CompressionMode: coderws.CompressionContextTakeover, } if proxy := strings.TrimSpace(proxyURL); proxy != "" { - parsedProxyURL, err := url.Parse(proxy) + proxyClient, err := d.proxyHTTPClient(proxy) if err != nil { - return nil, 0, nil, fmt.Errorf("invalid proxy url: %w", err) - } - opts.HTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyURL(parsedProxyURL), - }, + return nil, 0, nil, err } + opts.HTTPClient = proxyClient } conn, resp, err := coderws.Dial(ctx, targetURL, opts) @@ -67,6 +98,9 @@ func (d *coderOpenAIWSClientDialer) Dial( } return nil, status, respHeaders, err } + // coder/websocket 默认单消息读取上限为 32KB,Codex WS 事件(如 rate_limits/大 delta) + // 可能超过该阈值,需显式提高上限,避免本地 read_fail(message too big)。 + conn.SetReadLimit(openAIWSMessageReadLimitBytes) respHeaders := http.Header(nil) if resp != nil { respHeaders = cloneHeader(resp.Header) @@ -74,6 +108,127 @@ func (d *coderOpenAIWSClientDialer) Dial( return &coderOpenAIWSClientConn{conn: conn}, 0, respHeaders, nil } +func (d *coderOpenAIWSClientDialer) proxyHTTPClient(proxy string) (*http.Client, error) { + if d == nil { + return nil, errors.New("openai ws dialer is nil") + } + normalizedProxy := strings.TrimSpace(proxy) + if normalizedProxy == "" { + return nil, errors.New("proxy url is empty") + } + parsedProxyURL, err := url.Parse(normalizedProxy) + if err != nil { + return nil, fmt.Errorf("invalid proxy url: %w", err) + } + now := time.Now().UnixNano() + + d.proxyMu.Lock() + defer d.proxyMu.Unlock() + if entry, ok := d.proxyClients[normalizedProxy]; ok && entry != nil && entry.client != nil { + entry.lastUsedUnixNano = now + d.proxyHits.Add(1) + return entry.client, nil + } + d.cleanupProxyClientsLocked(now) + transport := &http.Transport{ + Proxy: http.ProxyURL(parsedProxyURL), + MaxIdleConns: openAIWSProxyTransportMaxIdleConns, + MaxIdleConnsPerHost: openAIWSProxyTransportMaxIdleConnsPerHost, + IdleConnTimeout: openAIWSProxyTransportIdleConnTimeout, + ForceAttemptHTTP2: true, + } + client := &http.Client{Transport: transport} + d.proxyClients[normalizedProxy] = &openAIWSProxyClientEntry{ + client: client, + lastUsedUnixNano: now, + } + d.ensureProxyClientCapacityLocked() + d.proxyMisses.Add(1) + return client, nil +} + +func (d *coderOpenAIWSClientDialer) cleanupProxyClientsLocked(nowUnixNano int64) { + if d == nil || len(d.proxyClients) == 0 { + return + } + idleTTL := openAIWSProxyClientCacheIdleTTL + if idleTTL <= 0 { + return + } + now := time.Unix(0, nowUnixNano) + for key, entry := range d.proxyClients { + if entry == nil || entry.client == nil { + delete(d.proxyClients, key) + continue + } + lastUsed := time.Unix(0, entry.lastUsedUnixNano) + if now.Sub(lastUsed) > idleTTL { + closeOpenAIWSProxyClient(entry.client) + delete(d.proxyClients, key) + } + } +} + +func (d *coderOpenAIWSClientDialer) ensureProxyClientCapacityLocked() { + if d == nil { + return + } + maxEntries := openAIWSProxyClientCacheMaxEntries + if maxEntries <= 0 { + return + } + for len(d.proxyClients) > maxEntries { + var oldestKey string + var oldestLastUsed int64 + hasOldest := false + for key, entry := range d.proxyClients { + lastUsed := int64(0) + if entry != nil { + lastUsed = entry.lastUsedUnixNano + } + if !hasOldest || lastUsed < oldestLastUsed { + hasOldest = true + oldestKey = key + oldestLastUsed = lastUsed + } + } + if !hasOldest { + return + } + if entry := d.proxyClients[oldestKey]; entry != nil { + closeOpenAIWSProxyClient(entry.client) + } + delete(d.proxyClients, oldestKey) + } +} + +func closeOpenAIWSProxyClient(client *http.Client) { + if client == nil || client.Transport == nil { + return + } + if transport, ok := client.Transport.(*http.Transport); ok && transport != nil { + transport.CloseIdleConnections() + } +} + +func (d *coderOpenAIWSClientDialer) SnapshotTransportMetrics() OpenAIWSTransportMetricsSnapshot { + if d == nil { + return OpenAIWSTransportMetricsSnapshot{} + } + hits := d.proxyHits.Load() + misses := d.proxyMisses.Load() + total := hits + misses + reuseRatio := 0.0 + if total > 0 { + reuseRatio = float64(hits) / float64(total) + } + return OpenAIWSTransportMetricsSnapshot{ + ProxyClientCacheHits: hits, + ProxyClientCacheMisses: misses, + TransportReuseRatio: reuseRatio, + } +} + type coderOpenAIWSClientConn struct { conn *coderws.Conn } diff --git a/backend/internal/service/openai_ws_client_test.go b/backend/internal/service/openai_ws_client_test.go new file mode 100644 index 000000000..e78c86284 --- /dev/null +++ b/backend/internal/service/openai_ws_client_test.go @@ -0,0 +1,96 @@ +package service + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +func TestCoderOpenAIWSClientDialer_ProxyHTTPClientReuse(t *testing.T) { + dialer := newDefaultOpenAIWSClientDialer() + impl, ok := dialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + c1, err := impl.proxyHTTPClient("http://127.0.0.1:8080") + require.NoError(t, err) + c2, err := impl.proxyHTTPClient("http://127.0.0.1:8080") + require.NoError(t, err) + require.Same(t, c1, c2, "同一代理地址应复用同一个 HTTP 客户端") + + c3, err := impl.proxyHTTPClient("http://127.0.0.1:8081") + require.NoError(t, err) + require.NotSame(t, c1, c3, "不同代理地址应分离客户端") +} + +func TestCoderOpenAIWSClientDialer_ProxyHTTPClientInvalidURL(t *testing.T) { + dialer := newDefaultOpenAIWSClientDialer() + impl, ok := dialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + _, err := impl.proxyHTTPClient("://bad") + require.Error(t, err) +} + +func TestCoderOpenAIWSClientDialer_TransportMetricsSnapshot(t *testing.T) { + dialer := newDefaultOpenAIWSClientDialer() + impl, ok := dialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + _, err := impl.proxyHTTPClient("http://127.0.0.1:18080") + require.NoError(t, err) + _, err = impl.proxyHTTPClient("http://127.0.0.1:18080") + require.NoError(t, err) + _, err = impl.proxyHTTPClient("http://127.0.0.1:18081") + require.NoError(t, err) + + snapshot := impl.SnapshotTransportMetrics() + require.Equal(t, int64(1), snapshot.ProxyClientCacheHits) + require.Equal(t, int64(2), snapshot.ProxyClientCacheMisses) + require.InDelta(t, 1.0/3.0, snapshot.TransportReuseRatio, 0.0001) +} + +func TestCoderOpenAIWSClientDialer_ProxyClientCacheCapacity(t *testing.T) { + dialer := newDefaultOpenAIWSClientDialer() + impl, ok := dialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + total := openAIWSProxyClientCacheMaxEntries + 32 + for i := 0; i < total; i++ { + _, err := impl.proxyHTTPClient(fmt.Sprintf("http://127.0.0.1:%d", 20000+i)) + require.NoError(t, err) + } + + impl.proxyMu.Lock() + cacheSize := len(impl.proxyClients) + impl.proxyMu.Unlock() + + require.LessOrEqual(t, cacheSize, openAIWSProxyClientCacheMaxEntries, "代理客户端缓存应受容量上限约束") +} + +func TestCoderOpenAIWSClientDialer_ProxyClientCacheIdleTTL(t *testing.T) { + dialer := newDefaultOpenAIWSClientDialer() + impl, ok := dialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + oldProxy := "http://127.0.0.1:28080" + _, err := impl.proxyHTTPClient(oldProxy) + require.NoError(t, err) + + impl.proxyMu.Lock() + oldEntry := impl.proxyClients[oldProxy] + require.NotNil(t, oldEntry) + oldEntry.lastUsedUnixNano = time.Now().Add(-openAIWSProxyClientCacheIdleTTL - time.Minute).UnixNano() + impl.proxyMu.Unlock() + + // 触发一次新的代理获取,驱动 TTL 清理。 + _, err = impl.proxyHTTPClient("http://127.0.0.1:28081") + require.NoError(t, err) + + impl.proxyMu.Lock() + _, exists := impl.proxyClients[oldProxy] + impl.proxyMu.Unlock() + + require.False(t, exists, "超过空闲 TTL 的代理客户端应被回收") +} diff --git a/backend/internal/service/openai_ws_fallback_test.go b/backend/internal/service/openai_ws_fallback_test.go index b68e2efa5..0a386aaa5 100644 --- a/backend/internal/service/openai_ws_fallback_test.go +++ b/backend/internal/service/openai_ws_fallback_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + coderws "github.com/coder/websocket" "github.com/stretchr/testify/require" ) @@ -31,8 +32,18 @@ func TestClassifyOpenAIWSErrorEvent(t *testing.T) { require.True(t, recoverable) reason, recoverable = classifyOpenAIWSErrorEvent([]byte(`{"type":"error","error":{"code":"previous_response_not_found","message":"not found"}}`)) - require.Equal(t, "event_error", reason) - require.False(t, recoverable) + require.Equal(t, "previous_response_not_found", reason) + require.True(t, recoverable) +} + +func TestClassifyOpenAIWSReconnectReason(t *testing.T) { + reason, retryable := classifyOpenAIWSReconnectReason(wrapOpenAIWSFallback("policy_violation", errors.New("policy"))) + require.Equal(t, "policy_violation", reason) + require.False(t, retryable) + + reason, retryable = classifyOpenAIWSReconnectReason(wrapOpenAIWSFallback("read_event", errors.New("io"))) + require.Equal(t, "read_event", reason) + require.True(t, retryable) } func TestOpenAIWSErrorHTTPStatus(t *testing.T) { @@ -58,3 +69,77 @@ func TestOpenAIWSFallbackCooling(t *testing.T) { time.Sleep(1200 * time.Millisecond) require.False(t, svc.isOpenAIWSFallbackCooling(2)) } + +func TestOpenAIWSRetryBackoff(t *testing.T) { + svc := &OpenAIGatewayService{cfg: &config.Config{}} + svc.cfg.Gateway.OpenAIWS.RetryBackoffInitialMS = 100 + svc.cfg.Gateway.OpenAIWS.RetryBackoffMaxMS = 400 + svc.cfg.Gateway.OpenAIWS.RetryJitterRatio = 0 + + require.Equal(t, time.Duration(100)*time.Millisecond, svc.openAIWSRetryBackoff(1)) + require.Equal(t, time.Duration(200)*time.Millisecond, svc.openAIWSRetryBackoff(2)) + require.Equal(t, time.Duration(400)*time.Millisecond, svc.openAIWSRetryBackoff(3)) + require.Equal(t, time.Duration(400)*time.Millisecond, svc.openAIWSRetryBackoff(4)) +} + +func TestOpenAIWSRetryTotalBudget(t *testing.T) { + svc := &OpenAIGatewayService{cfg: &config.Config{}} + svc.cfg.Gateway.OpenAIWS.RetryTotalBudgetMS = 1200 + require.Equal(t, 1200*time.Millisecond, svc.openAIWSRetryTotalBudget()) + + svc.cfg.Gateway.OpenAIWS.RetryTotalBudgetMS = 0 + require.Equal(t, time.Duration(0), svc.openAIWSRetryTotalBudget()) +} + +func TestClassifyOpenAIWSReadFallbackReason(t *testing.T) { + require.Equal(t, "policy_violation", classifyOpenAIWSReadFallbackReason(coderws.CloseError{Code: coderws.StatusPolicyViolation})) + require.Equal(t, "message_too_big", classifyOpenAIWSReadFallbackReason(coderws.CloseError{Code: coderws.StatusMessageTooBig})) + require.Equal(t, "read_event", classifyOpenAIWSReadFallbackReason(errors.New("io"))) +} + +func TestOpenAIWSStoreDisabledConnMode(t *testing.T) { + svc := &OpenAIGatewayService{cfg: &config.Config{}} + svc.cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn = true + require.Equal(t, openAIWSStoreDisabledConnModeStrict, svc.openAIWSStoreDisabledConnMode()) + + svc.cfg.Gateway.OpenAIWS.StoreDisabledConnMode = "adaptive" + require.Equal(t, openAIWSStoreDisabledConnModeAdaptive, svc.openAIWSStoreDisabledConnMode()) + + svc.cfg.Gateway.OpenAIWS.StoreDisabledConnMode = "" + svc.cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn = false + require.Equal(t, openAIWSStoreDisabledConnModeOff, svc.openAIWSStoreDisabledConnMode()) +} + +func TestShouldForceNewConnOnStoreDisabled(t *testing.T) { + require.True(t, shouldForceNewConnOnStoreDisabled(openAIWSStoreDisabledConnModeStrict, "")) + require.False(t, shouldForceNewConnOnStoreDisabled(openAIWSStoreDisabledConnModeOff, "policy_violation")) + + require.True(t, shouldForceNewConnOnStoreDisabled(openAIWSStoreDisabledConnModeAdaptive, "policy_violation")) + require.True(t, shouldForceNewConnOnStoreDisabled(openAIWSStoreDisabledConnModeAdaptive, "prewarm_message_too_big")) + require.False(t, shouldForceNewConnOnStoreDisabled(openAIWSStoreDisabledConnModeAdaptive, "read_event")) +} + +func TestOpenAIWSRetryMetricsSnapshot(t *testing.T) { + svc := &OpenAIGatewayService{} + svc.recordOpenAIWSRetryAttempt(150 * time.Millisecond) + svc.recordOpenAIWSRetryAttempt(0) + svc.recordOpenAIWSRetryExhausted() + svc.recordOpenAIWSNonRetryableFastFallback() + + snapshot := svc.SnapshotOpenAIWSRetryMetrics() + require.Equal(t, int64(2), snapshot.RetryAttemptsTotal) + require.Equal(t, int64(150), snapshot.RetryBackoffMsTotal) + require.Equal(t, int64(1), snapshot.RetryExhaustedTotal) + require.Equal(t, int64(1), snapshot.NonRetryableFastFallbackTotal) +} + +func TestShouldLogOpenAIWSPayloadSchema(t *testing.T) { + svc := &OpenAIGatewayService{cfg: &config.Config{}} + + svc.cfg.Gateway.OpenAIWS.PayloadLogSampleRate = 0 + require.True(t, svc.shouldLogOpenAIWSPayloadSchema(1), "首次尝试应始终记录 payload_schema") + require.False(t, svc.shouldLogOpenAIWSPayloadSchema(2)) + + svc.cfg.Gateway.OpenAIWS.PayloadLogSampleRate = 1 + require.True(t, svc.shouldLogOpenAIWSPayloadSchema(2)) +} diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 8f1c84c95..00d83c839 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -5,14 +5,17 @@ import ( "encoding/json" "errors" "fmt" + "math/rand" "net/http" "net/url" + "sort" "strings" "time" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" + coderws "github.com/coder/websocket" "github.com/gin-gonic/gin" "github.com/tidwall/gjson" "github.com/tidwall/sjson" @@ -25,6 +28,28 @@ const ( openAIWSTurnStateHeader = "x-codex-turn-state" openAIWSTurnMetadataHeader = "x-codex-turn-metadata" + + openAIWSLogValueMaxLen = 160 + openAIWSHeaderValueMaxLen = 120 + openAIWSIDValueMaxLen = 64 + openAIWSEventLogHeadLimit = 20 + openAIWSEventLogEveryN = 50 + openAIWSBufferLogHeadLimit = 8 + openAIWSBufferLogEveryN = 20 + openAIWSPrewarmEventLogHead = 10 + openAIWSPayloadKeySizeTopN = 6 + + openAIWSPayloadSizeEstimateDepth = 3 + openAIWSPayloadSizeEstimateMaxBytes = 64 * 1024 + openAIWSPayloadSizeEstimateMaxItems = 16 + + openAIWSEventFlushBatchSizeDefault = 4 + openAIWSEventFlushIntervalDefault = 25 * time.Millisecond + openAIWSPayloadLogSampleDefault = 0.2 + + openAIWSStoreDisabledConnModeStrict = "strict" + openAIWSStoreDisabledConnModeAdaptive = "adaptive" + openAIWSStoreDisabledConnModeOff = "off" ) // openAIWSFallbackError 表示可安全回退到 HTTP 的 WS 错误(尚未写下游)。 @@ -54,6 +79,411 @@ func wrapOpenAIWSFallback(reason string, err error) error { return &openAIWSFallbackError{Reason: strings.TrimSpace(reason), Err: err} } +func normalizeOpenAIWSLogValue(value string) string { + trimmed := strings.TrimSpace(value) + if trimmed == "" { + return "-" + } + replacer := strings.NewReplacer( + "error", "err", + "fallback", "fb", + "warning", "warnx", + "failed", "fail", + ) + return replacer.Replace(trimmed) +} + +func truncateOpenAIWSLogValue(value string, maxLen int) string { + normalized := normalizeOpenAIWSLogValue(value) + if normalized == "-" || maxLen <= 0 { + return normalized + } + if len(normalized) <= maxLen { + return normalized + } + return normalized[:maxLen] + "..." +} + +func openAIWSHeaderValueForLog(headers http.Header, key string) string { + if headers == nil { + return "-" + } + return truncateOpenAIWSLogValue(headers.Get(key), openAIWSHeaderValueMaxLen) +} + +func hasOpenAIWSHeader(headers http.Header, key string) bool { + if headers == nil { + return false + } + return strings.TrimSpace(headers.Get(key)) != "" +} + +type openAIWSSessionHeaderResolution struct { + SessionID string + ConversationID string + SessionSource string + ConversationSource string +} + +func resolveOpenAIWSSessionHeaders(c *gin.Context, promptCacheKey string) openAIWSSessionHeaderResolution { + resolution := openAIWSSessionHeaderResolution{ + SessionSource: "none", + ConversationSource: "none", + } + if c != nil && c.Request != nil { + if sessionID := strings.TrimSpace(c.Request.Header.Get("session_id")); sessionID != "" { + resolution.SessionID = sessionID + resolution.SessionSource = "header_session_id" + } + if conversationID := strings.TrimSpace(c.Request.Header.Get("conversation_id")); conversationID != "" { + resolution.ConversationID = conversationID + resolution.ConversationSource = "header_conversation_id" + if resolution.SessionID == "" { + resolution.SessionID = conversationID + resolution.SessionSource = "header_conversation_id" + } + } + } + + cacheKey := strings.TrimSpace(promptCacheKey) + if cacheKey != "" { + if resolution.SessionID == "" { + resolution.SessionID = cacheKey + resolution.SessionSource = "prompt_cache_key" + } + } + return resolution +} + +func shouldLogOpenAIWSEvent(idx int, eventType string) bool { + if idx <= openAIWSEventLogHeadLimit { + return true + } + if openAIWSEventLogEveryN > 0 && idx%openAIWSEventLogEveryN == 0 { + return true + } + if eventType == "error" || isOpenAIWSTerminalEvent(eventType) { + return true + } + return false +} + +func shouldLogOpenAIWSBufferedEvent(idx int) bool { + if idx <= openAIWSBufferLogHeadLimit { + return true + } + if openAIWSBufferLogEveryN > 0 && idx%openAIWSBufferLogEveryN == 0 { + return true + } + return false +} + +func summarizeOpenAIWSErrorEventFields(message []byte) (code string, errType string, errMessage string) { + if len(message) == 0 { + return "-", "-", "-" + } + code = truncateOpenAIWSLogValue(gjson.GetBytes(message, "error.code").String(), openAIWSLogValueMaxLen) + errType = truncateOpenAIWSLogValue(gjson.GetBytes(message, "error.type").String(), openAIWSLogValueMaxLen) + errMessage = truncateOpenAIWSLogValue(gjson.GetBytes(message, "error.message").String(), openAIWSLogValueMaxLen) + return code, errType, errMessage +} + +func summarizeOpenAIWSPayloadKeySizes(payload map[string]any, topN int) string { + if len(payload) == 0 { + return "-" + } + type keySize struct { + Key string + Size int + } + sizes := make([]keySize, 0, len(payload)) + for key, value := range payload { + size := estimateOpenAIWSPayloadValueSize(value, openAIWSPayloadSizeEstimateDepth) + sizes = append(sizes, keySize{Key: key, Size: size}) + } + sort.Slice(sizes, func(i, j int) bool { + if sizes[i].Size == sizes[j].Size { + return sizes[i].Key < sizes[j].Key + } + return sizes[i].Size > sizes[j].Size + }) + + if topN <= 0 || topN > len(sizes) { + topN = len(sizes) + } + parts := make([]string, 0, topN) + for idx := 0; idx < topN; idx++ { + item := sizes[idx] + parts = append(parts, fmt.Sprintf("%s:%d", item.Key, item.Size)) + } + return strings.Join(parts, ",") +} + +func estimateOpenAIWSPayloadValueSize(value any, depth int) int { + if depth <= 0 { + return -1 + } + switch v := value.(type) { + case nil: + return 0 + case string: + return len(v) + case []byte: + return len(v) + case bool: + return 1 + case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return 8 + case float32, float64: + return 8 + case map[string]any: + if len(v) == 0 { + return 2 + } + total := 2 + count := 0 + for key, item := range v { + count++ + if count > openAIWSPayloadSizeEstimateMaxItems { + return -1 + } + itemSize := estimateOpenAIWSPayloadValueSize(item, depth-1) + if itemSize < 0 { + return -1 + } + total += len(key) + itemSize + 3 + if total > openAIWSPayloadSizeEstimateMaxBytes { + return -1 + } + } + return total + case []any: + if len(v) == 0 { + return 2 + } + total := 2 + limit := len(v) + if limit > openAIWSPayloadSizeEstimateMaxItems { + return -1 + } + for i := 0; i < limit; i++ { + itemSize := estimateOpenAIWSPayloadValueSize(v[i], depth-1) + if itemSize < 0 { + return -1 + } + total += itemSize + 1 + if total > openAIWSPayloadSizeEstimateMaxBytes { + return -1 + } + } + return total + default: + raw, err := json.Marshal(v) + if err != nil { + return -1 + } + if len(raw) > openAIWSPayloadSizeEstimateMaxBytes { + return -1 + } + return len(raw) + } +} + +func openAIWSPayloadString(payload map[string]any, key string) string { + if len(payload) == 0 { + return "" + } + raw, ok := payload[key] + if !ok { + return "" + } + switch v := raw.(type) { + case string: + return strings.TrimSpace(v) + default: + return strings.TrimSpace(fmt.Sprintf("%v", raw)) + } +} + +func extractOpenAIWSImageURL(value any) string { + switch v := value.(type) { + case string: + return strings.TrimSpace(v) + case map[string]any: + if raw, ok := v["url"].(string); ok { + return strings.TrimSpace(raw) + } + } + return "" +} + +func summarizeOpenAIWSInput(input any) string { + items, ok := input.([]any) + if !ok || len(items) == 0 { + return "-" + } + + itemCount := len(items) + textChars := 0 + imageDataURLs := 0 + imageDataURLChars := 0 + imageRemoteURLs := 0 + + handleContentItem := func(contentItem map[string]any) { + contentType, _ := contentItem["type"].(string) + switch strings.TrimSpace(contentType) { + case "input_text", "output_text", "text": + if text, ok := contentItem["text"].(string); ok { + textChars += len(text) + } + case "input_image": + imageURL := extractOpenAIWSImageURL(contentItem["image_url"]) + if imageURL == "" { + return + } + if strings.HasPrefix(strings.ToLower(imageURL), "data:image/") { + imageDataURLs++ + imageDataURLChars += len(imageURL) + return + } + imageRemoteURLs++ + } + } + + handleInputItem := func(inputItem map[string]any) { + if content, ok := inputItem["content"].([]any); ok { + for _, rawContent := range content { + contentItem, ok := rawContent.(map[string]any) + if !ok { + continue + } + handleContentItem(contentItem) + } + return + } + + itemType, _ := inputItem["type"].(string) + switch strings.TrimSpace(itemType) { + case "input_text", "output_text", "text": + if text, ok := inputItem["text"].(string); ok { + textChars += len(text) + } + case "input_image": + imageURL := extractOpenAIWSImageURL(inputItem["image_url"]) + if imageURL == "" { + return + } + if strings.HasPrefix(strings.ToLower(imageURL), "data:image/") { + imageDataURLs++ + imageDataURLChars += len(imageURL) + return + } + imageRemoteURLs++ + } + } + + for _, rawItem := range items { + inputItem, ok := rawItem.(map[string]any) + if !ok { + continue + } + handleInputItem(inputItem) + } + + return fmt.Sprintf( + "items=%d,text_chars=%d,image_data_urls=%d,image_data_url_chars=%d,image_remote_urls=%d", + itemCount, + textChars, + imageDataURLs, + imageDataURLChars, + imageRemoteURLs, + ) +} + +func dropOpenAIWSPayloadKey(payload map[string]any, key string, removed *[]string) { + if len(payload) == 0 || strings.TrimSpace(key) == "" { + return + } + if _, exists := payload[key]; !exists { + return + } + delete(payload, key) + *removed = append(*removed, key) +} + +// applyOpenAIWSRetryPayloadStrategy 在 WS 连续失败时仅移除无语义字段, +// 避免重试成功却改变原始请求语义。 +// 注意:prompt_cache_key 不应在重试中移除;它常用于会话稳定标识(session_id 兜底)。 +func applyOpenAIWSRetryPayloadStrategy(payload map[string]any, attempt int) (strategy string, removedKeys []string) { + if len(payload) == 0 { + return "empty", nil + } + if attempt <= 1 { + return "full", nil + } + + removed := make([]string, 0, 2) + if attempt >= 2 { + dropOpenAIWSPayloadKey(payload, "include", &removed) + } + + if len(removed) == 0 { + return "full", nil + } + sort.Strings(removed) + return "trim_optional_fields", removed +} + +func logOpenAIWSModeInfo(format string, args ...any) { + logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS Mode] "+format, args...) +} + +func summarizeOpenAIWSReadCloseError(err error) (status string, reason string) { + if err == nil { + return "-", "-" + } + statusCode := coderws.CloseStatus(err) + if statusCode == -1 { + return "-", "-" + } + closeStatus := fmt.Sprintf("%d(%s)", int(statusCode), statusCode.String()) + closeReason := "-" + var closeErr coderws.CloseError + if errors.As(err, &closeErr) { + reasonText := strings.TrimSpace(closeErr.Reason) + if reasonText != "" { + closeReason = normalizeOpenAIWSLogValue(reasonText) + } + } + return normalizeOpenAIWSLogValue(closeStatus), closeReason +} + +func classifyOpenAIWSReadFallbackReason(err error) string { + if err == nil { + return "read_event" + } + switch coderws.CloseStatus(err) { + case coderws.StatusPolicyViolation: + return "policy_violation" + case coderws.StatusMessageTooBig: + return "message_too_big" + default: + return "read_event" + } +} + +func sortedKeys(m map[string]any) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + func (s *OpenAIGatewayService) getOpenAIWSConnPool() *openAIWSConnPool { if s == nil { return nil @@ -74,6 +504,25 @@ func (s *OpenAIGatewayService) SnapshotOpenAIWSPoolMetrics() OpenAIWSPoolMetrics return pool.SnapshotMetrics() } +type OpenAIWSPerformanceMetricsSnapshot struct { + Pool OpenAIWSPoolMetricsSnapshot `json:"pool"` + Retry OpenAIWSRetryMetricsSnapshot `json:"retry"` + Transport OpenAIWSTransportMetricsSnapshot `json:"transport"` +} + +func (s *OpenAIGatewayService) SnapshotOpenAIWSPerformanceMetrics() OpenAIWSPerformanceMetricsSnapshot { + pool := s.getOpenAIWSConnPool() + snapshot := OpenAIWSPerformanceMetricsSnapshot{ + Retry: s.SnapshotOpenAIWSRetryMetrics(), + } + if pool == nil { + return snapshot + } + snapshot.Pool = pool.SnapshotMetrics() + snapshot.Transport = pool.SnapshotTransportMetrics() + return snapshot +} + func (s *OpenAIGatewayService) getOpenAIWSStateStore() OpenAIWSStateStore { if s == nil { return nil @@ -110,6 +559,59 @@ func (s *OpenAIGatewayService) openAIWSWriteTimeout() time.Duration { return 2 * time.Minute } +func (s *OpenAIGatewayService) openAIWSEventFlushBatchSize() int { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.EventFlushBatchSize > 0 { + return s.cfg.Gateway.OpenAIWS.EventFlushBatchSize + } + return openAIWSEventFlushBatchSizeDefault +} + +func (s *OpenAIGatewayService) openAIWSEventFlushInterval() time.Duration { + if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.EventFlushIntervalMS >= 0 { + if s.cfg.Gateway.OpenAIWS.EventFlushIntervalMS == 0 { + return 0 + } + return time.Duration(s.cfg.Gateway.OpenAIWS.EventFlushIntervalMS) * time.Millisecond + } + return openAIWSEventFlushIntervalDefault +} + +func (s *OpenAIGatewayService) openAIWSPayloadLogSampleRate() float64 { + if s != nil && s.cfg != nil { + rate := s.cfg.Gateway.OpenAIWS.PayloadLogSampleRate + if rate < 0 { + return 0 + } + if rate > 1 { + return 1 + } + return rate + } + return openAIWSPayloadLogSampleDefault +} + +func (s *OpenAIGatewayService) shouldLogOpenAIWSPayloadSchema(attempt int) bool { + // 首次尝试保留一条完整 payload_schema 便于排障。 + if attempt <= 1 { + return true + } + rate := s.openAIWSPayloadLogSampleRate() + if rate <= 0 { + return false + } + if rate >= 1 { + return true + } + return rand.Float64() < rate +} + +func (s *OpenAIGatewayService) shouldEmitOpenAIWSPayloadSchema(attempt int) bool { + if !s.shouldLogOpenAIWSPayloadSchema(attempt) { + return false + } + return logger.L().Core().Enabled(zap.DebugLevel) +} + func (s *OpenAIGatewayService) openAIWSDialTimeout() time.Duration { if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.DialTimeoutSeconds > 0 { return time.Duration(s.cfg.Gateway.OpenAIWS.DialTimeoutSeconds) * time.Second @@ -165,15 +667,23 @@ func (s *OpenAIGatewayService) buildOpenAIWSHeaders( isCodexCLI bool, turnState string, turnMetadata string, -) http.Header { + promptCacheKey string, +) (http.Header, openAIWSSessionHeaderResolution) { headers := make(http.Header) headers.Set("authorization", "Bearer "+token) + sessionResolution := resolveOpenAIWSSessionHeaders(c, promptCacheKey) if c != nil && c.Request != nil { if v := strings.TrimSpace(c.Request.Header.Get("accept-language")); v != "" { headers.Set("accept-language", v) } } + if sessionResolution.SessionID != "" { + headers.Set("session_id", sessionResolution.SessionID) + } + if sessionResolution.ConversationID != "" { + headers.Set("conversation_id", sessionResolution.ConversationID) + } if state := strings.TrimSpace(turnState); state != "" { headers.Set(openAIWSTurnStateHeader, state) } @@ -196,11 +706,7 @@ func (s *OpenAIGatewayService) buildOpenAIWSHeaders( if decision.Transport == OpenAIUpstreamTransportResponsesWebsocket { betaValue = openAIWSBetaV1Value } - if account != nil && account.Type == AccountTypeOAuth { - headers.Set("OpenAI-Beta", betaValue+",responses=experimental") - } else { - headers.Set("OpenAI-Beta", betaValue) - } + headers.Set("OpenAI-Beta", betaValue) customUA := "" if account != nil { @@ -220,17 +726,21 @@ func (s *OpenAIGatewayService) buildOpenAIWSHeaders( headers.Set("user-agent", codexCLIUserAgent) } - return headers + return headers, sessionResolution } func (s *OpenAIGatewayService) buildOpenAIWSCreatePayload(reqBody map[string]any, account *Account) map[string]any { + // OpenAI WS Mode 协议:response.create 字段与 HTTP /responses 基本一致。 + // 保留 stream 字段(与 Codex CLI 一致),仅移除 background。 payload := make(map[string]any, len(reqBody)+1) for k, v := range reqBody { payload[k] = v } - delete(payload, "stream") delete(payload, "background") + if _, exists := payload["stream"]; !exists { + payload["stream"] = true + } payload["type"] = "response.create" // OAuth 默认保持 store=false,避免误依赖服务端历史。 @@ -277,6 +787,60 @@ func (s *OpenAIGatewayService) isOpenAIWSStoreRecoveryAllowed(account *Account) return false } +func (s *OpenAIGatewayService) isOpenAIWSStoreDisabledInRequest(reqBody map[string]any, account *Account) bool { + if account != nil && account.Type == AccountTypeOAuth && !s.isOpenAIWSStoreRecoveryAllowed(account) { + return true + } + if len(reqBody) == 0 { + return false + } + rawStore, ok := reqBody["store"] + if !ok { + return false + } + storeEnabled, ok := rawStore.(bool) + if !ok { + return false + } + return !storeEnabled +} + +func (s *OpenAIGatewayService) openAIWSStoreDisabledConnMode() string { + if s == nil || s.cfg == nil { + return openAIWSStoreDisabledConnModeStrict + } + mode := strings.ToLower(strings.TrimSpace(s.cfg.Gateway.OpenAIWS.StoreDisabledConnMode)) + switch mode { + case openAIWSStoreDisabledConnModeStrict, openAIWSStoreDisabledConnModeAdaptive, openAIWSStoreDisabledConnModeOff: + return mode + case "": + // 兼容旧配置:仅配置了布尔开关时按旧语义推导。 + if s.cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn { + return openAIWSStoreDisabledConnModeStrict + } + return openAIWSStoreDisabledConnModeOff + default: + return openAIWSStoreDisabledConnModeStrict + } +} + +func shouldForceNewConnOnStoreDisabled(mode, lastFailureReason string) bool { + switch mode { + case openAIWSStoreDisabledConnModeOff: + return false + case openAIWSStoreDisabledConnModeAdaptive: + reason := strings.TrimPrefix(strings.TrimSpace(lastFailureReason), "prewarm_") + switch reason { + case "policy_violation", "message_too_big", "auth_failed", "write_request", "write": + return true + default: + return false + } + default: + return true + } +} + func (s *OpenAIGatewayService) forwardOpenAIWSV2( ctx context.Context, c *gin.Context, @@ -289,6 +853,8 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( originalModel string, mappedModel string, startTime time.Time, + attempt int, + lastFailureReason string, ) (*OpenAIForwardResult, error) { if s == nil || account == nil { return nil, wrapOpenAIWSFallback("invalid_state", errors.New("service or account is nil")) @@ -298,13 +864,33 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if err != nil { return nil, wrapOpenAIWSFallback("build_ws_url", err) } + wsHost := "-" + wsPath := "-" + if parsed, parseErr := url.Parse(wsURL); parseErr == nil && parsed != nil { + if h := strings.TrimSpace(parsed.Host); h != "" { + wsHost = normalizeOpenAIWSLogValue(h) + } + if p := strings.TrimSpace(parsed.Path); p != "" { + wsPath = normalizeOpenAIWSLogValue(p) + } + } + logOpenAIWSModeInfo( + "dial_target account_id=%d account_type=%s ws_host=%s ws_path=%s", + account.ID, + account.Type, + wsHost, + wsPath, + ) payload := s.buildOpenAIWSCreatePayload(reqBody, account) - previousResponseID := strings.TrimSpace(gjson.Get(payloadAsJSON(payload), "previous_response_id").String()) - - stateStore := s.getOpenAIWSStateStore() - groupID := getOpenAIGroupIDFromContext(c) - sessionHash := s.GenerateSessionHash(c, []byte(payloadAsJSON(payload))) + payloadStrategy, removedKeys := applyOpenAIWSRetryPayloadStrategy(payload, attempt) + previousResponseID := openAIWSPayloadString(payload, "previous_response_id") + promptCacheKey := openAIWSPayloadString(payload, "prompt_cache_key") + _, hasTools := payload["tools"] + streamValue := "-" + if raw, ok := payload["stream"]; ok { + streamValue = normalizeOpenAIWSLogValue(strings.TrimSpace(fmt.Sprintf("%v", raw))) + } turnState := "" turnMetadata := "" if c != nil && c.Request != nil { @@ -312,6 +898,33 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( turnMetadata = strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)) } setOpenAIWSTurnMetadata(payload, turnMetadata) + payloadJSON := payloadAsJSONBytes(payload) + payloadEventType := openAIWSPayloadString(payload, "type") + if payloadEventType == "" { + payloadEventType = "response.create" + } + if s.shouldEmitOpenAIWSPayloadSchema(attempt) { + logOpenAIWSModeInfo( + "[debug] payload_schema account_id=%d attempt=%d event=%s payload_keys=%s payload_bytes=%d payload_key_sizes=%s input_summary=%s stream=%s payload_strategy=%s removed_keys=%s has_previous_response_id=%v has_prompt_cache_key=%v has_tools=%v", + account.ID, + attempt, + payloadEventType, + normalizeOpenAIWSLogValue(strings.Join(sortedKeys(payload), ",")), + len(payloadJSON), + normalizeOpenAIWSLogValue(summarizeOpenAIWSPayloadKeySizes(payload, openAIWSPayloadKeySizeTopN)), + normalizeOpenAIWSLogValue(summarizeOpenAIWSInput(payload["input"])), + streamValue, + normalizeOpenAIWSLogValue(payloadStrategy), + normalizeOpenAIWSLogValue(strings.Join(removedKeys, ",")), + previousResponseID != "", + promptCacheKey != "", + hasTools, + ) + } + + stateStore := s.getOpenAIWSStateStore() + groupID := getOpenAIGroupIDFromContext(c) + sessionHash := s.GenerateSessionHash(c, payloadJSON) if turnState == "" && stateStore != nil && sessionHash != "" { if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { turnState = savedTurnState @@ -323,6 +936,47 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( preferredConnID = connID } } + storeDisabled := s.isOpenAIWSStoreDisabledInRequest(reqBody, account) + if stateStore != nil && storeDisabled && previousResponseID == "" && sessionHash != "" { + if connID, ok := stateStore.GetSessionConn(groupID, sessionHash); ok { + preferredConnID = connID + } + } + storeDisabledConnMode := s.openAIWSStoreDisabledConnMode() + forceNewConnByPolicy := shouldForceNewConnOnStoreDisabled(storeDisabledConnMode, lastFailureReason) + forceNewConn := forceNewConnByPolicy && storeDisabled && previousResponseID == "" && sessionHash != "" && preferredConnID == "" + wsHeaders, sessionResolution := s.buildOpenAIWSHeaders(c, account, token, decision, isCodexCLI, turnState, turnMetadata, promptCacheKey) + logOpenAIWSModeInfo( + "acquire_start account_id=%d account_type=%s transport=%s preferred_conn_id=%s has_previous_response_id=%v session_hash=%s has_turn_state=%v turn_state_len=%d has_turn_metadata=%v turn_metadata_len=%d store_disabled=%v store_disabled_conn_mode=%s retry_last_reason=%s force_new_conn=%v header_user_agent=%s header_openai_beta=%s header_originator=%s header_accept_language=%s header_session_id=%s header_conversation_id=%s session_id_source=%s conversation_id_source=%s has_prompt_cache_key=%v has_chatgpt_account_id=%v has_authorization=%v has_session_id=%v has_conversation_id=%v proxy_enabled=%v", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(decision.Transport)), + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + previousResponseID != "", + truncateOpenAIWSLogValue(sessionHash, 12), + turnState != "", + len(turnState), + turnMetadata != "", + len(turnMetadata), + storeDisabled, + normalizeOpenAIWSLogValue(storeDisabledConnMode), + truncateOpenAIWSLogValue(lastFailureReason, openAIWSLogValueMaxLen), + forceNewConn, + openAIWSHeaderValueForLog(wsHeaders, "user-agent"), + openAIWSHeaderValueForLog(wsHeaders, "openai-beta"), + openAIWSHeaderValueForLog(wsHeaders, "originator"), + openAIWSHeaderValueForLog(wsHeaders, "accept-language"), + openAIWSHeaderValueForLog(wsHeaders, "session_id"), + openAIWSHeaderValueForLog(wsHeaders, "conversation_id"), + normalizeOpenAIWSLogValue(sessionResolution.SessionSource), + normalizeOpenAIWSLogValue(sessionResolution.ConversationSource), + promptCacheKey != "", + hasOpenAIWSHeader(wsHeaders, "chatgpt-account-id"), + hasOpenAIWSHeader(wsHeaders, "authorization"), + hasOpenAIWSHeader(wsHeaders, "session_id"), + hasOpenAIWSHeader(wsHeaders, "conversation_id"), + account.ProxyID != nil && account.Proxy != nil, + ) acquireCtx, acquireCancel := context.WithTimeout(ctx, s.openAIWSDialTimeout()+s.openAIWSWriteTimeout()) defer acquireCancel() @@ -330,8 +984,9 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( lease, err := s.getOpenAIWSConnPool().Acquire(acquireCtx, openAIWSAcquireRequest{ Account: account, WSURL: wsURL, - Headers: s.buildOpenAIWSHeaders(c, account, token, decision, isCodexCLI, turnState, turnMetadata), + Headers: wsHeaders, PreferredConnID: preferredConnID, + ForceNewConn: forceNewConn, ProxyURL: func() string { if account.ProxyID != nil && account.Proxy != nil { return account.Proxy.URL() @@ -340,18 +995,53 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( }(), }) if err != nil { + dialStatus := 0 + var dialErr *openAIWSDialError + if errors.As(err, &dialErr) { + dialStatus = dialErr.StatusCode + } + logOpenAIWSModeInfo( + "acquire_fail account_id=%d account_type=%s transport=%s reason=%s dial_status=%d cause=%s preferred_conn_id=%s", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(decision.Transport)), + normalizeOpenAIWSLogValue(classifyOpenAIWSAcquireError(err)), + dialStatus, + truncateOpenAIWSLogValue(err.Error(), openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + ) return nil, wrapOpenAIWSFallback(classifyOpenAIWSAcquireError(err), err) } defer lease.Release() + connID := strings.TrimSpace(lease.ConnID()) + logOpenAIWSModeInfo( + "connected account_id=%d account_type=%s transport=%s conn_id=%s conn_reused=%v conn_pick_ms=%d queue_wait_ms=%d has_previous_response_id=%v", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(decision.Transport)), + connID, + lease.Reused(), + lease.ConnPickDuration().Milliseconds(), + lease.QueueWaitDuration().Milliseconds(), + previousResponseID != "", + ) if c != nil { + SetOpsLatencyMs(c, OpsOpenAIWSConnPickMsKey, lease.ConnPickDuration().Milliseconds()) SetOpsLatencyMs(c, OpsOpenAIWSQueueWaitMsKey, lease.QueueWaitDuration().Milliseconds()) c.Set(OpsOpenAIWSConnReusedKey, lease.Reused()) - if connID := strings.TrimSpace(lease.ConnID()); connID != "" { + if connID != "" { c.Set(OpsOpenAIWSConnIDKey, connID) } } handshakeTurnState := strings.TrimSpace(lease.HandshakeHeader(openAIWSTurnStateHeader)) + logOpenAIWSModeInfo( + "handshake account_id=%d conn_id=%s has_turn_state=%v turn_state_len=%d", + account.ID, + connID, + handshakeTurnState != "", + len(handshakeTurnState), + ) if handshakeTurnState != "" { if stateStore != nil && sessionHash != "" { stateStore.BindSessionTurnState(groupID, sessionHash, handshakeTurnState, s.openAIWSSessionStickyTTL()) @@ -377,8 +1067,23 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if err := lease.WriteJSON(payload, s.openAIWSWriteTimeout()); err != nil { lease.MarkBroken() + logOpenAIWSModeInfo( + "write_request_fail account_id=%d conn_id=%s cause=%s payload_bytes=%d", + account.ID, + connID, + truncateOpenAIWSLogValue(err.Error(), openAIWSLogValueMaxLen), + len(payloadJSON), + ) return nil, wrapOpenAIWSFallback("write_request", err) } + logOpenAIWSModeInfo( + "write_request_sent account_id=%d conn_id=%s stream=%v payload_bytes=%d previous_response_id=%s", + account.ID, + connID, + reqStream, + len(payloadJSON), + truncateOpenAIWSLogValue(previousResponseID, openAIWSIDValueMaxLen), + ) usage := &OpenAIUsage{} var firstTokenMs *int @@ -386,6 +1091,14 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( var finalResponse []byte wroteDownstream := false needModelReplace := originalModel != mappedModel + bufferedStreamEvents := make([][]byte, 0, 4) + eventCount := 0 + tokenEventCount := 0 + terminalEventCount := 0 + bufferedEventCount := 0 + flushedBufferedEventCount := 0 + firstEventType := "" + lastEventType := "" var flusher http.Flusher if reqStream { @@ -405,12 +1118,83 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } clientDisconnected := false + flushBatchSize := s.openAIWSEventFlushBatchSize() + flushInterval := s.openAIWSEventFlushInterval() + pendingFlushEvents := 0 + lastFlushAt := time.Now() + flushStreamWriter := func(force bool) { + if clientDisconnected || flusher == nil || pendingFlushEvents <= 0 { + return + } + if !force && flushBatchSize > 1 && pendingFlushEvents < flushBatchSize { + if flushInterval <= 0 || time.Since(lastFlushAt) < flushInterval { + return + } + } + flusher.Flush() + pendingFlushEvents = 0 + lastFlushAt = time.Now() + } + emitStreamMessage := func(message []byte, forceFlush bool) { + if clientDisconnected { + return + } + if _, wErr := fmt.Fprintf(c.Writer, "data: %s\n\n", message); wErr == nil { + wroteDownstream = true + pendingFlushEvents++ + flushStreamWriter(forceFlush) + return + } + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS Mode] client disconnected, continue draining upstream: account=%d", account.ID) + } + flushBufferedStreamEvents := func(reason string) { + if len(bufferedStreamEvents) == 0 { + return + } + flushed := len(bufferedStreamEvents) + for _, buffered := range bufferedStreamEvents { + emitStreamMessage(buffered, false) + } + bufferedStreamEvents = bufferedStreamEvents[:0] + flushStreamWriter(true) + flushedBufferedEventCount += flushed + logOpenAIWSModeInfo( + "buffer_flush account_id=%d conn_id=%s reason=%s flushed=%d total_flushed=%d client_disconnected=%v", + account.ID, + connID, + truncateOpenAIWSLogValue(reason, openAIWSLogValueMaxLen), + flushed, + flushedBufferedEventCount, + clientDisconnected, + ) + } + + readTimeout := s.openAIWSReadTimeout() + for { - message, readErr := lease.ReadMessage(s.openAIWSReadTimeout()) + message, readErr := lease.ReadMessageWithContextTimeout(ctx, readTimeout) if readErr != nil { lease.MarkBroken() + closeStatus, closeReason := summarizeOpenAIWSReadCloseError(readErr) + logOpenAIWSModeInfo( + "read_fail account_id=%d conn_id=%s wrote_downstream=%v close_status=%s close_reason=%s cause=%s events=%d token_events=%d terminal_events=%d buffered_pending=%d buffered_flushed=%d first_event=%s last_event=%s", + account.ID, + connID, + wroteDownstream, + closeStatus, + closeReason, + truncateOpenAIWSLogValue(readErr.Error(), openAIWSLogValueMaxLen), + eventCount, + tokenEventCount, + terminalEventCount, + len(bufferedStreamEvents), + flushedBufferedEventCount, + truncateOpenAIWSLogValue(firstEventType, openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(lastEventType, openAIWSLogValueMaxLen), + ) if !wroteDownstream { - return nil, wrapOpenAIWSFallback("read_event", readErr) + return nil, wrapOpenAIWSFallback(classifyOpenAIWSReadFallbackReason(readErr), readErr) } if clientDisconnected { break @@ -423,23 +1207,49 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if eventType == "" { continue } + eventCount++ + if firstEventType == "" { + firstEventType = eventType + } + lastEventType = eventType if responseID == "" { responseID = extractOpenAIWSResponseID(message) } - if firstTokenMs == nil && isOpenAIWSTokenEvent(eventType) { + isTokenEvent := isOpenAIWSTokenEvent(eventType) + if isTokenEvent { + tokenEventCount++ + } + isTerminalEvent := isOpenAIWSTerminalEvent(eventType) + if isTerminalEvent { + terminalEventCount++ + } + if firstTokenMs == nil && isTokenEvent { ms := int(time.Since(startTime).Milliseconds()) firstTokenMs = &ms } + if shouldLogOpenAIWSEvent(eventCount, eventType) { + logOpenAIWSModeInfo( + "event_received account_id=%d conn_id=%s idx=%d type=%s bytes=%d token=%v terminal=%v buffered_pending=%d", + account.ID, + connID, + eventCount, + truncateOpenAIWSLogValue(eventType, openAIWSLogValueMaxLen), + len(message), + isTokenEvent, + isTerminalEvent, + len(bufferedStreamEvents), + ) + } if needModelReplace { message = replaceOpenAIWSMessageModel(message, mappedModel, originalModel) } - if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEData(string(message)); changed { - message = []byte(corrected) + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(message); changed { + message = corrected } - s.parseSSEUsage(string(message), usage) + s.parseSSEUsageBytes(message, usage) if eventType == "error" { errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) @@ -447,15 +1257,26 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( errMsg = "Upstream websocket error" } fallbackReason, canFallback := classifyOpenAIWSErrorEvent(message) + errCode, errType, errMessage := summarizeOpenAIWSErrorEventFields(message) + logOpenAIWSModeInfo( + "error_event account_id=%d conn_id=%s idx=%d fallback_reason=%s can_fallback=%v err_code=%s err_type=%s err_message=%s", + account.ID, + connID, + eventCount, + truncateOpenAIWSLogValue(fallbackReason, openAIWSLogValueMaxLen), + canFallback, + errCode, + errType, + errMessage, + ) if !wroteDownstream && canFallback { return nil, wrapOpenAIWSFallback(fallbackReason, errors.New(errMsg)) } statusCode := openAIWSErrorHTTPStatus(message) setOpsUpstreamError(c, statusCode, errMsg, "") if reqStream && !clientDisconnected { - if _, wErr := fmt.Fprintf(c.Writer, "data: %s\n\n", message); wErr == nil { - flusher.Flush() - } + flushBufferedStreamEvents("error_event") + emitStreamMessage(message, true) } if !reqStream { c.JSON(statusCode, gin.H{ @@ -469,14 +1290,28 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } if reqStream { - if !clientDisconnected { - if _, wErr := fmt.Fprintf(c.Writer, "data: %s\n\n", message); wErr == nil { - wroteDownstream = true - flusher.Flush() - } else { - clientDisconnected = true - logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS] client disconnected, continue draining upstream: account=%d", account.ID) + // 在首个 token 前先缓冲事件(如 response.created), + // 以便上游早期断连时仍可安全回退到 HTTP,不给下游发送半截流。 + shouldBuffer := firstTokenMs == nil && !isTokenEvent && !isTerminalEvent + if shouldBuffer { + buffered := make([]byte, len(message)) + copy(buffered, message) + bufferedStreamEvents = append(bufferedStreamEvents, buffered) + bufferedEventCount++ + if shouldLogOpenAIWSBufferedEvent(bufferedEventCount) { + logOpenAIWSModeInfo( + "buffer_enqueue account_id=%d conn_id=%s idx=%d event_idx=%d event_type=%s buffer_size=%d", + account.ID, + connID, + bufferedEventCount, + eventCount, + truncateOpenAIWSLogValue(eventType, openAIWSLogValueMaxLen), + len(bufferedStreamEvents), + ) } + } else { + flushBufferedStreamEvents(eventType) + emitStreamMessage(message, isTerminalEvent) } } else { if response := gjson.GetBytes(message, "response"); response.Exists() && response.Type == gjson.JSON { @@ -484,7 +1319,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } } - if isOpenAIWSTerminalEvent(eventType) { + if isTerminalEvent { if !reqStream { if len(finalResponse) == 0 { if resp := gjson.GetBytes(message, "response"); resp.Exists() && resp.Type == gjson.JSON { @@ -498,6 +1333,15 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if !reqStream { if len(finalResponse) == 0 { + logOpenAIWSModeInfo( + "missing_final_response account_id=%d conn_id=%s events=%d token_events=%d terminal_events=%d wrote_downstream=%v", + account.ID, + connID, + eventCount, + tokenEventCount, + terminalEventCount, + wroteDownstream, + ) if !wroteDownstream { return nil, wrapOpenAIWSFallback("missing_final_response", errors.New("no terminal response payload")) } @@ -514,6 +1358,8 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } c.Data(http.StatusOK, "application/json", finalResponse) + } else { + flushStreamWriter(true) } if responseID != "" && stateStore != nil { @@ -521,6 +1367,31 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( _ = stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl) stateStore.BindResponseConn(responseID, lease.ConnID(), ttl) } + if stateStore != nil && storeDisabled && sessionHash != "" { + stateStore.BindSessionConn(groupID, sessionHash, lease.ConnID(), s.openAIWSSessionStickyTTL()) + } + firstTokenMsValue := -1 + if firstTokenMs != nil { + firstTokenMsValue = *firstTokenMs + } + logOpenAIWSModeInfo( + "completed account_id=%d conn_id=%s response_id=%s stream=%v duration_ms=%d events=%d token_events=%d terminal_events=%d buffered_events=%d buffered_flushed=%d first_event=%s last_event=%s first_token_ms=%d wrote_downstream=%v client_disconnected=%v", + account.ID, + connID, + truncateOpenAIWSLogValue(strings.TrimSpace(responseID), openAIWSIDValueMaxLen), + reqStream, + time.Since(startTime).Milliseconds(), + eventCount, + tokenEventCount, + terminalEventCount, + bufferedEventCount, + flushedBufferedEventCount, + truncateOpenAIWSLogValue(firstEventType, openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(lastEventType, openAIWSLogValueMaxLen), + firstTokenMsValue, + wroteDownstream, + clientDisconnected, + ) return &OpenAIForwardResult{ RequestID: responseID, @@ -550,51 +1421,103 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( stateStore OpenAIWSStateStore, groupID int64, ) error { - if s == nil || lease == nil || account == nil { + if s == nil { return nil } + if lease == nil || account == nil { + logOpenAIWSModeInfo("prewarm_skip reason=invalid_state has_lease=%v has_account=%v", lease != nil, account != nil) + return nil + } + connID := strings.TrimSpace(lease.ConnID()) if !s.isOpenAIWSGeneratePrewarmEnabled() { return nil } if decision.Transport != OpenAIUpstreamTransportResponsesWebsocketV2 { + logOpenAIWSModeInfo( + "prewarm_skip account_id=%d conn_id=%s reason=transport_not_v2 transport=%s", + account.ID, + connID, + normalizeOpenAIWSLogValue(string(decision.Transport)), + ) return nil } if strings.TrimSpace(previousResponseID) != "" { + logOpenAIWSModeInfo( + "prewarm_skip account_id=%d conn_id=%s reason=has_previous_response_id previous_response_id=%s", + account.ID, + connID, + truncateOpenAIWSLogValue(previousResponseID, openAIWSIDValueMaxLen), + ) return nil } if lease.IsPrewarmed() { + logOpenAIWSModeInfo("prewarm_skip account_id=%d conn_id=%s reason=already_prewarmed", account.ID, connID) return nil } if NeedsToolContinuation(reqBody) { + logOpenAIWSModeInfo("prewarm_skip account_id=%d conn_id=%s reason=tool_continuation", account.ID, connID) return nil } + prewarmStart := time.Now() + logOpenAIWSModeInfo("prewarm_start account_id=%d conn_id=%s", account.ID, connID) prewarmPayload := make(map[string]any, len(payload)+1) for k, v := range payload { prewarmPayload[k] = v } prewarmPayload["generate"] = false + prewarmPayloadJSON := payloadAsJSONBytes(prewarmPayload) if err := lease.WriteJSON(prewarmPayload, s.openAIWSWriteTimeout()); err != nil { lease.MarkBroken() + logOpenAIWSModeInfo( + "prewarm_write_fail account_id=%d conn_id=%s cause=%s", + account.ID, + connID, + truncateOpenAIWSLogValue(err.Error(), openAIWSLogValueMaxLen), + ) return wrapOpenAIWSFallback("prewarm_write", err) } + logOpenAIWSModeInfo("prewarm_write_sent account_id=%d conn_id=%s payload_bytes=%d", account.ID, connID, len(prewarmPayloadJSON)) prewarmResponseID := "" + prewarmEventCount := 0 + prewarmTerminalCount := 0 for { - message, readErr := lease.ReadMessage(s.openAIWSReadTimeout()) + message, readErr := lease.ReadMessageWithContextTimeout(ctx, s.openAIWSReadTimeout()) if readErr != nil { lease.MarkBroken() - return wrapOpenAIWSFallback("prewarm_read_event", readErr) + closeStatus, closeReason := summarizeOpenAIWSReadCloseError(readErr) + logOpenAIWSModeInfo( + "prewarm_read_fail account_id=%d conn_id=%s close_status=%s close_reason=%s cause=%s events=%d", + account.ID, + connID, + closeStatus, + closeReason, + truncateOpenAIWSLogValue(readErr.Error(), openAIWSLogValueMaxLen), + prewarmEventCount, + ) + return wrapOpenAIWSFallback("prewarm_"+classifyOpenAIWSReadFallbackReason(readErr), readErr) } eventType := strings.TrimSpace(gjson.GetBytes(message, "type").String()) if eventType == "" { continue } + prewarmEventCount++ if prewarmResponseID == "" { prewarmResponseID = extractOpenAIWSResponseID(message) } + if prewarmEventCount <= openAIWSPrewarmEventLogHead || eventType == "error" || isOpenAIWSTerminalEvent(eventType) { + logOpenAIWSModeInfo( + "prewarm_event account_id=%d conn_id=%s idx=%d type=%s bytes=%d", + account.ID, + connID, + prewarmEventCount, + truncateOpenAIWSLogValue(eventType, openAIWSLogValueMaxLen), + len(message), + ) + } if eventType == "error" { errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) @@ -602,6 +1525,18 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( errMsg = "OpenAI websocket prewarm error" } fallbackReason, canFallback := classifyOpenAIWSErrorEvent(message) + errCode, errType, errMessage := summarizeOpenAIWSErrorEventFields(message) + logOpenAIWSModeInfo( + "prewarm_error_event account_id=%d conn_id=%s idx=%d fallback_reason=%s can_fallback=%v err_code=%s err_type=%s err_message=%s", + account.ID, + connID, + prewarmEventCount, + truncateOpenAIWSLogValue(fallbackReason, openAIWSLogValueMaxLen), + canFallback, + errCode, + errType, + errMessage, + ) lease.MarkBroken() if canFallback { return wrapOpenAIWSFallback("prewarm_"+fallbackReason, errors.New(errMsg)) @@ -610,6 +1545,7 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( } if isOpenAIWSTerminalEvent(eventType) { + prewarmTerminalCount++ break } } @@ -620,18 +1556,31 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( _ = stateStore.BindResponseAccount(ctx, groupID, prewarmResponseID, account.ID, ttl) stateStore.BindResponseConn(prewarmResponseID, lease.ConnID(), ttl) } + logOpenAIWSModeInfo( + "prewarm_done account_id=%d conn_id=%s response_id=%s events=%d terminal_events=%d duration_ms=%d", + account.ID, + connID, + truncateOpenAIWSLogValue(prewarmResponseID, openAIWSIDValueMaxLen), + prewarmEventCount, + prewarmTerminalCount, + time.Since(prewarmStart).Milliseconds(), + ) return nil } func payloadAsJSON(payload map[string]any) string { + return string(payloadAsJSONBytes(payload)) +} + +func payloadAsJSONBytes(payload map[string]any) []byte { if len(payload) == 0 { - return "{}" + return []byte("{}") } body, err := json.Marshal(payload) if err != nil { - return "{}" + return []byte("{}") } - return string(body) + return body } func extractOpenAIWSResponseID(message []byte) string { @@ -793,26 +1742,6 @@ func (s *OpenAIGatewayService) SelectAccountByPreviousResponseID( return nil, nil } -func (s *OpenAIGatewayService) logOpenAIWSFallback(ctx context.Context, account *Account, reason string, err error) { - if s == nil { - return - } - fields := []zap.Field{ - zap.String("component", "service.openai_gateway"), - zap.String("reason", strings.TrimSpace(reason)), - } - if account != nil { - fields = append(fields, - zap.Int64("account_id", account.ID), - zap.String("account_type", string(account.Type)), - ) - } - if err != nil { - fields = append(fields, zap.Error(err)) - } - logger.FromContext(ctx).With(fields...).Warn("OpenAI WS 回退到 HTTP") -} - func classifyOpenAIWSAcquireError(err error) string { if err == nil { return "acquire_conn" @@ -854,6 +1783,10 @@ func classifyOpenAIWSErrorEvent(message []byte) (string, bool) { return "upgrade_required", true case "websocket_not_supported", "websocket_unsupported": return "ws_unsupported", true + case "websocket_connection_limit_reached": + return "ws_connection_limit_reached", true + case "previous_response_not_found": + return "previous_response_not_found", true } if strings.Contains(msg, "upgrade required") || strings.Contains(msg, "status 426") { return "upgrade_required", true @@ -864,6 +1797,13 @@ func classifyOpenAIWSErrorEvent(message []byte) (string, bool) { if strings.Contains(msg, "websocket") && strings.Contains(msg, "unsupported") { return "ws_unsupported", true } + if strings.Contains(msg, "connection limit") && strings.Contains(msg, "websocket") { + return "ws_connection_limit_reached", true + } + if strings.Contains(msg, "previous_response_not_found") || + (strings.Contains(msg, "previous response") && strings.Contains(msg, "not found")) { + return "previous_response_not_found", true + } if strings.Contains(errType, "server_error") || strings.Contains(code, "server_error") { return "upstream_error_event", true } diff --git a/backend/internal/service/openai_ws_forwarder_benchmark_test.go b/backend/internal/service/openai_ws_forwarder_benchmark_test.go new file mode 100644 index 000000000..dd2d0ae97 --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder_benchmark_test.go @@ -0,0 +1,77 @@ +package service + +import ( + "fmt" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +var ( + benchmarkOpenAIWSPayloadJSONSink string + benchmarkOpenAIWSStringSink string + benchmarkOpenAIWSBoolSink bool +) + +func BenchmarkOpenAIWSForwarderHotPath(b *testing.B) { + cfg := &config.Config{} + svc := &OpenAIGatewayService{cfg: cfg} + account := &Account{ID: 1, Platform: PlatformOpenAI, Type: AccountTypeOAuth} + reqBody := benchmarkOpenAIWSHotPathRequest() + + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + payload := svc.buildOpenAIWSCreatePayload(reqBody, account) + _, _ = applyOpenAIWSRetryPayloadStrategy(payload, 2) + setOpenAIWSTurnMetadata(payload, `{"trace":"bench","turn":"1"}`) + + benchmarkOpenAIWSStringSink = openAIWSPayloadString(payload, "previous_response_id") + benchmarkOpenAIWSBoolSink = payload["tools"] != nil + benchmarkOpenAIWSStringSink = summarizeOpenAIWSPayloadKeySizes(payload, openAIWSPayloadKeySizeTopN) + benchmarkOpenAIWSStringSink = summarizeOpenAIWSInput(payload["input"]) + benchmarkOpenAIWSPayloadJSONSink = payloadAsJSON(payload) + } +} + +func benchmarkOpenAIWSHotPathRequest() map[string]any { + tools := make([]map[string]any, 0, 24) + for i := 0; i < 24; i++ { + tools = append(tools, map[string]any{ + "type": "function", + "name": fmt.Sprintf("tool_%02d", i), + "description": "benchmark tool schema", + "parameters": map[string]any{ + "type": "object", + "properties": map[string]any{ + "query": map[string]any{"type": "string"}, + "limit": map[string]any{"type": "number"}, + }, + "required": []string{"query"}, + }, + }) + } + + input := make([]map[string]any, 0, 16) + for i := 0; i < 16; i++ { + input = append(input, map[string]any{ + "role": "user", + "type": "message", + "content": fmt.Sprintf("benchmark message %d", i), + }) + } + + return map[string]any{ + "type": "response.create", + "model": "gpt-5.3-codex", + "input": input, + "tools": tools, + "parallel_tool_calls": true, + "previous_response_id": "resp_benchmark_prev", + "prompt_cache_key": "bench-cache-key", + "reasoning": map[string]any{"effort": "medium"}, + "instructions": "benchmark instructions", + "store": false, + } +} diff --git a/backend/internal/service/openai_ws_forwarder_retry_payload_test.go b/backend/internal/service/openai_ws_forwarder_retry_payload_test.go new file mode 100644 index 000000000..0ea7e1c72 --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder_retry_payload_test.go @@ -0,0 +1,50 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestApplyOpenAIWSRetryPayloadStrategy_KeepPromptCacheKey(t *testing.T) { + payload := map[string]any{ + "model": "gpt-5.3-codex", + "prompt_cache_key": "pcache_123", + "include": []any{"reasoning.encrypted_content"}, + "text": map[string]any{ + "verbosity": "low", + }, + "tools": []any{map[string]any{"type": "function"}}, + } + + strategy, removed := applyOpenAIWSRetryPayloadStrategy(payload, 3) + require.Equal(t, "trim_optional_fields", strategy) + require.Contains(t, removed, "include") + require.NotContains(t, removed, "prompt_cache_key") + require.Equal(t, "pcache_123", payload["prompt_cache_key"]) + require.NotContains(t, payload, "include") + require.Contains(t, payload, "text") +} + +func TestApplyOpenAIWSRetryPayloadStrategy_AttemptSixKeepsSemanticFields(t *testing.T) { + payload := map[string]any{ + "prompt_cache_key": "pcache_456", + "instructions": "long instructions", + "tools": []any{map[string]any{"type": "function"}}, + "parallel_tool_calls": true, + "tool_choice": "auto", + "include": []any{"reasoning.encrypted_content"}, + "text": map[string]any{"verbosity": "high"}, + } + + strategy, removed := applyOpenAIWSRetryPayloadStrategy(payload, 6) + require.Equal(t, "trim_optional_fields", strategy) + require.Contains(t, removed, "include") + require.NotContains(t, removed, "prompt_cache_key") + require.Equal(t, "pcache_456", payload["prompt_cache_key"]) + require.Contains(t, payload, "instructions") + require.Contains(t, payload, "tools") + require.Contains(t, payload, "tool_choice") + require.Contains(t, payload, "parallel_tool_calls") + require.Contains(t, payload, "text") +} diff --git a/backend/internal/service/openai_ws_forwarder_success_test.go b/backend/internal/service/openai_ws_forwarder_success_test.go index ceb29928d..95ef85d9a 100644 --- a/backend/internal/service/openai_ws_forwarder_success_test.go +++ b/backend/internal/service/openai_ws_forwarder_success_test.go @@ -11,6 +11,7 @@ import ( "sync" "sync/atomic" "testing" + "time" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/gin-gonic/gin" @@ -25,6 +26,8 @@ func TestOpenAIGatewayService_Forward_WSv2_SuccessAndBindSticky(t *testing.T) { type receivedPayload struct { Type string PreviousResponseID string + StreamExists bool + Stream bool } receivedCh := make(chan receivedPayload, 1) @@ -44,9 +47,12 @@ func TestOpenAIGatewayService_Forward_WSv2_SuccessAndBindSticky(t *testing.T) { t.Errorf("read ws request failed: %v", err) return } + requestJSON := requestToJSONString(request) receivedCh <- receivedPayload{ - Type: strings.TrimSpace(gjson.Get(requestToJSONString(request), "type").String()), - PreviousResponseID: strings.TrimSpace(gjson.Get(requestToJSONString(request), "previous_response_id").String()), + Type: strings.TrimSpace(gjson.Get(requestJSON, "type").String()), + PreviousResponseID: strings.TrimSpace(gjson.Get(requestJSON, "previous_response_id").String()), + StreamExists: gjson.Get(requestJSON, "stream").Exists(), + Stream: gjson.Get(requestJSON, "stream").Bool(), } if err := conn.WriteJSON(map[string]any{ @@ -147,6 +153,8 @@ func TestOpenAIGatewayService_Forward_WSv2_SuccessAndBindSticky(t *testing.T) { received := <-receivedCh require.Equal(t, "response.create", received.Type) require.Equal(t, "resp_prev_1", received.PreviousResponseID) + require.True(t, received.StreamExists, "WS 请求应携带 stream 字段") + require.False(t, received.Stream, "应保持客户端 stream=false 的原始语义") store := svc.getOpenAIWSStateStore() mappedAccountID, getErr := store.GetResponseAccount(context.Background(), groupID, "resp_new_1") @@ -278,6 +286,7 @@ func TestOpenAIGatewayService_Forward_WSv2_PoolReuseNotOneToOne(t *testing.T) { require.Equal(t, int64(1), upgradeCount.Load(), "多个客户端请求应复用账号连接池而不是 1:1 对等建链") metrics := svc.SnapshotOpenAIWSPoolMetrics() require.GreaterOrEqual(t, metrics.AcquireReuseTotal, int64(1)) + require.GreaterOrEqual(t, metrics.ConnPickTotal, int64(1)) } func TestOpenAIGatewayService_Forward_WSv2_OAuthStoreFalseByDefault(t *testing.T) { @@ -287,6 +296,8 @@ func TestOpenAIGatewayService_Forward_WSv2_OAuthStoreFalseByDefault(t *testing.T c, _ := gin.CreateTestContext(rec) c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + c.Request.Header.Set("session_id", "sess-oauth-1") + c.Request.Header.Set("conversation_id", "conv-oauth-1") cfg := &config.Config{} cfg.Security.URLAllowlist.Enabled = false @@ -343,7 +354,75 @@ func TestOpenAIGatewayService_Forward_WSv2_OAuthStoreFalseByDefault(t *testing.T requestJSON := requestToJSONString(captureConn.lastWrite) require.True(t, gjson.Get(requestJSON, "store").Exists(), "OAuth WSv2 应显式写入 store 字段") require.False(t, gjson.Get(requestJSON, "store").Bool(), "默认策略应将 OAuth store 置为 false") - require.Contains(t, captureDialer.lastHeaders.Get("OpenAI-Beta"), "responses=experimental") + require.True(t, gjson.Get(requestJSON, "stream").Exists(), "WSv2 payload 应保留 stream 字段") + require.True(t, gjson.Get(requestJSON, "stream").Bool(), "OAuth Codex 规范化后应强制 stream=true") + require.Equal(t, openAIWSBetaV2Value, captureDialer.lastHeaders.Get("OpenAI-Beta")) + require.Equal(t, "sess-oauth-1", captureDialer.lastHeaders.Get("session_id")) + require.Equal(t, "conv-oauth-1", captureDialer.lastHeaders.Get("conversation_id")) +} + +func TestOpenAIGatewayService_Forward_WSv2_HeaderSessionFallbackFromPromptCacheKey(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + + captureConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_prompt_cache_key","model":"gpt-5.1","usage":{"input_tokens":2,"output_tokens":1}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + account := &Account{ + ID: 31, + Name: "openai-oauth", + Platform: PlatformOpenAI, + Type: AccountTypeOAuth, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "access_token": "oauth-token-1", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":true,"prompt_cache_key":"pcache_123","input":[{"type":"input_text","text":"hi"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_prompt_cache_key", result.RequestID) + + require.Equal(t, "pcache_123", captureDialer.lastHeaders.Get("session_id")) + require.Empty(t, captureDialer.lastHeaders.Get("conversation_id")) + require.NotNil(t, captureConn.lastWrite) + require.True(t, gjson.Get(requestToJSONString(captureConn.lastWrite), "stream").Exists()) } func TestOpenAIGatewayService_Forward_WSv1_Unsupported(t *testing.T) { @@ -594,6 +673,56 @@ func TestOpenAIGatewayService_Forward_WSv2_GeneratePrewarm(t *testing.T) { require.False(t, gjson.Get(secondWrite, "generate").Exists()) } +func TestOpenAIGatewayService_PrewarmReadHonorsParentContext(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.PrewarmGenerateEnabled = true + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 5 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + svc := &OpenAIGatewayService{ + cfg: cfg, + toolCorrector: NewCodexToolCorrector(), + } + account := &Account{ + ID: 601, + Name: "openai-prewarm-timeout", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + } + conn := newOpenAIWSConn("prewarm_ctx_conn", account.ID, &openAIWSBlockingConn{ + readDelay: 200 * time.Millisecond, + }, nil) + lease := &openAIWSConnLease{ + accountID: account.ID, + conn: conn, + } + payload := map[string]any{ + "type": "response.create", + "model": "gpt-5.1", + } + + ctx, cancel := context.WithTimeout(context.Background(), 40*time.Millisecond) + defer cancel() + start := time.Now() + err := svc.performOpenAIWSGeneratePrewarm( + ctx, + lease, + OpenAIWSProtocolDecision{Transport: OpenAIUpstreamTransportResponsesWebsocketV2}, + payload, + "", + map[string]any{"model": "gpt-5.1"}, + account, + nil, + 0, + ) + elapsed := time.Since(start) + require.Error(t, err) + require.Contains(t, err.Error(), "prewarm_read_event") + require.Less(t, elapsed, 180*time.Millisecond, "预热读取应受父 context 取消控制,不应阻塞到 read_timeout") +} + func TestOpenAIGatewayService_Forward_WSv2_TurnMetadataInPayloadOnConnReuse(t *testing.T) { gin.SetMode(gin.TestMode) @@ -674,6 +803,289 @@ func TestOpenAIGatewayService_Forward_WSv2_TurnMetadataInPayloadOnConnReuse(t *t require.Equal(t, "turn_meta_payload_2", gjson.Get(secondWrite, "client_metadata.x-codex-turn-metadata").String()) } +func TestOpenAIGatewayService_Forward_WSv2StoreFalseSessionConnIsolation(t *testing.T) { + gin.SetMode(gin.TestMode) + + var upgradeCount atomic.Int64 + var sequence atomic.Int64 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + upgradeCount.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + for { + var request map[string]any + if err := conn.ReadJSON(&request); err != nil { + return + } + responseID := "resp_store_false_" + strconv.FormatInt(sequence.Add(1), 10) + if err := conn.WriteJSON(map[string]any{ + "type": "response.completed", + "response": map[string]any{ + "id": responseID, + "model": "gpt-5.1", + "usage": map[string]any{ + "input_tokens": 1, + "output_tokens": 1, + }, + }, + }); err != nil { + return + } + } + })) + defer wsServer.Close() + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 4 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 4 + cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn = true + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 79, + Name: "openai-store-false", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 2, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"store":false,"input":[{"type":"input_text","text":"hello"}]}`) + + rec1 := httptest.NewRecorder() + c1, _ := gin.CreateTestContext(rec1) + c1.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c1.Request.Header.Set("session_id", "session_store_false_a") + result1, err := svc.Forward(context.Background(), c1, account, body) + require.NoError(t, err) + require.NotNil(t, result1) + require.Equal(t, int64(1), upgradeCount.Load()) + + rec2 := httptest.NewRecorder() + c2, _ := gin.CreateTestContext(rec2) + c2.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c2.Request.Header.Set("session_id", "session_store_false_a") + result2, err := svc.Forward(context.Background(), c2, account, body) + require.NoError(t, err) + require.NotNil(t, result2) + require.Equal(t, int64(1), upgradeCount.Load(), "同一 session(store=false) 应复用同一 WS 连接") + + rec3 := httptest.NewRecorder() + c3, _ := gin.CreateTestContext(rec3) + c3.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c3.Request.Header.Set("session_id", "session_store_false_b") + result3, err := svc.Forward(context.Background(), c3, account, body) + require.NoError(t, err) + require.NotNil(t, result3) + require.Equal(t, int64(2), upgradeCount.Load(), "不同 session(store=false) 应隔离连接,避免续链状态互相覆盖") +} + +func TestOpenAIGatewayService_Forward_WSv2StoreFalseDisableForceNewConnAllowsReuse(t *testing.T) { + gin.SetMode(gin.TestMode) + + var upgradeCount atomic.Int64 + var sequence atomic.Int64 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + upgradeCount.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + for { + var request map[string]any + if err := conn.ReadJSON(&request); err != nil { + return + } + responseID := "resp_store_false_reuse_" + strconv.FormatInt(sequence.Add(1), 10) + if err := conn.WriteJSON(map[string]any{ + "type": "response.completed", + "response": map[string]any{ + "id": responseID, + "model": "gpt-5.1", + "usage": map[string]any{ + "input_tokens": 1, + "output_tokens": 1, + }, + }, + }); err != nil { + return + } + } + })) + defer wsServer.Close() + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.StoreDisabledForceNewConn = false + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 80, + Name: "openai-store-false-reuse", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 2, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"store":false,"input":[{"type":"input_text","text":"hello"}]}`) + + rec1 := httptest.NewRecorder() + c1, _ := gin.CreateTestContext(rec1) + c1.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c1.Request.Header.Set("session_id", "session_store_false_reuse_a") + result1, err := svc.Forward(context.Background(), c1, account, body) + require.NoError(t, err) + require.NotNil(t, result1) + require.Equal(t, int64(1), upgradeCount.Load()) + + rec2 := httptest.NewRecorder() + c2, _ := gin.CreateTestContext(rec2) + c2.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c2.Request.Header.Set("session_id", "session_store_false_reuse_b") + result2, err := svc.Forward(context.Background(), c2, account, body) + require.NoError(t, err) + require.NotNil(t, result2) + require.Equal(t, int64(1), upgradeCount.Load(), "关闭强制新连后,不同 session(store=false) 可复用连接") +} + +func TestOpenAIGatewayService_Forward_WSv2ReadTimeoutAppliesPerRead(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 1 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + captureConn := &openAIWSCaptureConn{ + readDelays: []time.Duration{ + 700 * time.Millisecond, + 700 * time.Millisecond, + }, + events: [][]byte{ + []byte(`{"type":"response.created","response":{"id":"resp_timeout_ok","model":"gpt-5.1"}}`), + []byte(`{"type":"response.completed","response":{"id":"resp_timeout_ok","model":"gpt-5.1","usage":{"input_tokens":2,"output_tokens":1}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"id":"resp_http_fallback","usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 81, + Name: "openai-read-timeout", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.1","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_timeout_ok", result.RequestID) + require.Nil(t, upstream.lastReq, "每次 Read 都应独立应用超时;总时长超过 read_timeout 不应误回退 HTTP") +} + type openAIWSCaptureDialer struct { mu sync.Mutex conn *openAIWSCaptureConn @@ -706,11 +1118,12 @@ func (d *openAIWSCaptureDialer) DialCount() int { } type openAIWSCaptureConn struct { - mu sync.Mutex - events [][]byte - lastWrite map[string]any - writes []map[string]any - closed bool + mu sync.Mutex + readDelays []time.Duration + events [][]byte + lastWrite map[string]any + writes []map[string]any + closed bool } func (c *openAIWSCaptureConn) WriteJSON(ctx context.Context, value any) error { @@ -728,17 +1141,35 @@ func (c *openAIWSCaptureConn) WriteJSON(ctx context.Context, value any) error { } func (c *openAIWSCaptureConn) ReadMessage(ctx context.Context) ([]byte, error) { - _ = ctx + if ctx == nil { + ctx = context.Background() + } c.mu.Lock() - defer c.mu.Unlock() if c.closed { + c.mu.Unlock() return nil, errOpenAIWSConnClosed } if len(c.events) == 0 { + c.mu.Unlock() return nil, io.EOF } + delay := time.Duration(0) + if len(c.readDelays) > 0 { + delay = c.readDelays[0] + c.readDelays = c.readDelays[1:] + } event := c.events[0] c.events = c.events[1:] + c.mu.Unlock() + if delay > 0 { + timer := time.NewTimer(delay) + defer timer.Stop() + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-timer.C: + } + } return event, nil } diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index 342fee1bd..04a91ba1c 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -17,9 +17,12 @@ import ( const ( openAIWSConnMaxAge = 60 * time.Minute - openAIWSConnHealthCheckIdle = 30 * time.Second + openAIWSConnHealthCheckIdle = 90 * time.Second openAIWSConnHealthCheckTO = 2 * time.Second openAIWSConnPrewarmExtraDelay = 2 * time.Second + + openAIWSPrewarmFailureWindow = 30 * time.Second + openAIWSPrewarmFailureSuppress = 2 ) var ( @@ -55,6 +58,8 @@ type openAIWSAcquireRequest struct { Headers http.Header ProxyURL string PreferredConnID string + // ForceNewConn: 强制本次获取新连接(避免复用导致连接内续链状态互相污染)。 + ForceNewConn bool } type openAIWSConnLease struct { @@ -62,6 +67,7 @@ type openAIWSConnLease struct { accountID int64 conn *openAIWSConn queueWait time.Duration + connPick time.Duration reused bool released atomic.Bool } @@ -80,6 +86,13 @@ func (l *openAIWSConnLease) QueueWaitDuration() time.Duration { return l.queueWait } +func (l *openAIWSConnLease) ConnPickDuration() time.Duration { + if l == nil { + return 0 + } + return l.connPick +} + func (l *openAIWSConnLease) Reused() bool { if l == nil { return false @@ -115,6 +128,13 @@ func (l *openAIWSConnLease) WriteJSON(value any, timeout time.Duration) error { return l.conn.writeJSONWithTimeout(value, timeout) } +func (l *openAIWSConnLease) WriteJSONContext(ctx context.Context, value any) error { + if l == nil || l.conn == nil { + return errOpenAIWSConnClosed + } + return l.conn.writeJSON(value, ctx) +} + func (l *openAIWSConnLease) ReadMessage(timeout time.Duration) ([]byte, error) { if l == nil || l.conn == nil { return nil, errOpenAIWSConnClosed @@ -122,6 +142,20 @@ func (l *openAIWSConnLease) ReadMessage(timeout time.Duration) ([]byte, error) { return l.conn.readMessageWithTimeout(timeout) } +func (l *openAIWSConnLease) ReadMessageContext(ctx context.Context) ([]byte, error) { + if l == nil || l.conn == nil { + return nil, errOpenAIWSConnClosed + } + return l.conn.readMessage(ctx) +} + +func (l *openAIWSConnLease) ReadMessageWithContextTimeout(ctx context.Context, timeout time.Duration) ([]byte, error) { + if l == nil || l.conn == nil { + return nil, errOpenAIWSConnClosed + } + return l.conn.readMessageWithContextTimeout(ctx, timeout) +} + func (l *openAIWSConnLease) MarkBroken() { if l == nil || l.pool == nil || l.conn == nil { return @@ -254,16 +288,24 @@ func (c *openAIWSConn) writeJSONWithTimeout(value any, timeout time.Duration) er default: } + writeCtx := context.Background() + if timeout <= 0 { + return c.writeJSON(value, writeCtx) + } + var cancel context.CancelFunc + writeCtx, cancel = context.WithTimeout(writeCtx, timeout) + defer cancel() + return c.writeJSON(value, writeCtx) +} + +func (c *openAIWSConn) writeJSON(value any, writeCtx context.Context) error { c.ioMu.Lock() defer c.ioMu.Unlock() if c.ws == nil { return errOpenAIWSConnClosed } - writeCtx := context.Background() - if timeout > 0 { - var cancel context.CancelFunc - writeCtx, cancel = context.WithTimeout(writeCtx, timeout) - defer cancel() + if writeCtx == nil { + writeCtx = context.Background() } if err := c.ws.WriteJSON(writeCtx, value); err != nil { return err @@ -273,6 +315,10 @@ func (c *openAIWSConn) writeJSONWithTimeout(value any, timeout time.Duration) er } func (c *openAIWSConn) readMessageWithTimeout(timeout time.Duration) ([]byte, error) { + return c.readMessageWithContextTimeout(context.Background(), timeout) +} + +func (c *openAIWSConn) readMessageWithContextTimeout(parent context.Context, timeout time.Duration) ([]byte, error) { if c == nil { return nil, errOpenAIWSConnClosed } @@ -282,16 +328,25 @@ func (c *openAIWSConn) readMessageWithTimeout(timeout time.Duration) ([]byte, er default: } + if parent == nil { + parent = context.Background() + } + if timeout <= 0 { + return c.readMessage(parent) + } + readCtx, cancel := context.WithTimeout(parent, timeout) + defer cancel() + return c.readMessage(readCtx) +} + +func (c *openAIWSConn) readMessage(readCtx context.Context) ([]byte, error) { c.ioMu.Lock() defer c.ioMu.Unlock() if c.ws == nil { return nil, errOpenAIWSConnClosed } - readCtx := context.Background() - if timeout > 0 { - var cancel context.CancelFunc - readCtx, cancel = context.WithTimeout(readCtx, timeout) - defer cancel() + if readCtx == nil { + readCtx = context.Background() } payload, err := c.ws.ReadMessage(readCtx) if err != nil { @@ -407,10 +462,14 @@ func (c *openAIWSConn) markPrewarmed() { } type openAIWSAccountPool struct { + mu sync.Mutex conns map[string]*openAIWSConn creating int lastAcquire *openAIWSAcquireRequest prewarmActive bool + prewarmUntil time.Time + prewarmFails int + prewarmFailAt time.Time } type OpenAIWSPoolMetricsSnapshot struct { @@ -419,6 +478,8 @@ type OpenAIWSPoolMetricsSnapshot struct { AcquireCreateTotal int64 AcquireQueueWaitTotal int64 AcquireQueueWaitMsTotal int64 + ConnPickTotal int64 + ConnPickMsTotal int64 ScaleUpTotal int64 ScaleDownTotal int64 } @@ -429,6 +490,8 @@ type openAIWSPoolMetrics struct { acquireCreateTotal atomic.Int64 acquireQueueWaitTotal atomic.Int64 acquireQueueWaitMs atomic.Int64 + connPickTotal atomic.Int64 + connPickMs atomic.Int64 scaleUpTotal atomic.Int64 scaleDownTotal atomic.Int64 } @@ -438,7 +501,7 @@ type openAIWSConnPool struct { // 通过接口解耦底层 WS 客户端实现,默认使用 coder/websocket。 clientDialer openAIWSClientDialer - mu sync.Mutex + mu sync.RWMutex accounts map[int64]*openAIWSAccountPool seq atomic.Uint64 @@ -463,11 +526,23 @@ func (p *openAIWSConnPool) SnapshotMetrics() OpenAIWSPoolMetricsSnapshot { AcquireCreateTotal: p.metrics.acquireCreateTotal.Load(), AcquireQueueWaitTotal: p.metrics.acquireQueueWaitTotal.Load(), AcquireQueueWaitMsTotal: p.metrics.acquireQueueWaitMs.Load(), + ConnPickTotal: p.metrics.connPickTotal.Load(), + ConnPickMsTotal: p.metrics.connPickMs.Load(), ScaleUpTotal: p.metrics.scaleUpTotal.Load(), ScaleDownTotal: p.metrics.scaleDownTotal.Load(), } } +func (p *openAIWSConnPool) SnapshotTransportMetrics() OpenAIWSTransportMetricsSnapshot { + if p == nil { + return OpenAIWSTransportMetricsSnapshot{} + } + if dialer, ok := p.clientDialer.(openAIWSTransportMetricsDialer); ok { + return dialer.SnapshotTransportMetrics() + } + return OpenAIWSTransportMetricsSnapshot{} +} + func (p *openAIWSConnPool) setClientDialerForTest(dialer openAIWSClientDialer) { if p == nil || dialer == nil { return @@ -493,70 +568,115 @@ func (p *openAIWSConnPool) acquire(ctx context.Context, req openAIWSAcquireReque accountID := req.Account.ID effectiveMaxConns := p.effectiveMaxConnsByAccount(req.Account) var evicted []*openAIWSConn - - p.mu.Lock() - ap := p.ensureAccountPoolLocked(accountID) + ap := p.getOrCreateAccountPool(accountID) + ap.mu.Lock() ap.lastAcquire = cloneOpenAIWSAcquireRequestPtr(&req) evicted = p.cleanupAccountLocked(ap, time.Now(), effectiveMaxConns) - - if preferred := stringsTrim(req.PreferredConnID); preferred != "" { - if conn, ok := ap.conns[preferred]; ok && conn.tryAcquire() { - p.mu.Unlock() - closeOpenAIWSConns(evicted) - if p.shouldHealthCheckConn(conn) { - if err := conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { - conn.close() - p.evictConn(accountID, conn.id) - if retry < 1 { - return p.acquire(ctx, req, retry+1) + pickStartedAt := time.Now() + allowReuse := !req.ForceNewConn + + if allowReuse { + if preferred := stringsTrim(req.PreferredConnID); preferred != "" { + if conn, ok := ap.conns[preferred]; ok && conn.tryAcquire() { + connPick := time.Since(pickStartedAt) + p.recordConnPickDuration(connPick) + ap.mu.Unlock() + closeOpenAIWSConns(evicted) + if p.shouldHealthCheckConn(conn) { + if err := conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + conn.close() + p.evictConn(accountID, conn.id) + if retry < 1 { + return p.acquire(ctx, req, retry+1) + } + return nil, err } - return nil, err } + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, connPick: connPick, reused: true} + p.metrics.acquireReuseTotal.Add(1) + p.ensureTargetIdleAsync(accountID) + return lease, nil } - lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, reused: true} - p.metrics.acquireReuseTotal.Add(1) - p.ensureTargetIdleAsync(accountID) - return lease, nil } - } - for _, conn := range p.sortedConnsByLoadLocked(ap) { - if conn.tryAcquire() { - p.mu.Unlock() + best := p.pickLeastBusyConnLocked(ap, "") + if best != nil && best.tryAcquire() { + connPick := time.Since(pickStartedAt) + p.recordConnPickDuration(connPick) + ap.mu.Unlock() closeOpenAIWSConns(evicted) - if p.shouldHealthCheckConn(conn) { - if err := conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { - conn.close() - p.evictConn(accountID, conn.id) + if p.shouldHealthCheckConn(best) { + if err := best.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + best.close() + p.evictConn(accountID, best.id) if retry < 1 { return p.acquire(ctx, req, retry+1) } return nil, err } } - lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, reused: true} + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: best, connPick: connPick, reused: true} p.metrics.acquireReuseTotal.Add(1) p.ensureTargetIdleAsync(accountID) return lease, nil } + for _, conn := range ap.conns { + if conn == nil || conn == best { + continue + } + if conn.tryAcquire() { + connPick := time.Since(pickStartedAt) + p.recordConnPickDuration(connPick) + ap.mu.Unlock() + closeOpenAIWSConns(evicted) + if p.shouldHealthCheckConn(conn) { + if err := conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + conn.close() + p.evictConn(accountID, conn.id) + if retry < 1 { + return p.acquire(ctx, req, retry+1) + } + return nil, err + } + } + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, connPick: connPick, reused: true} + p.metrics.acquireReuseTotal.Add(1) + p.ensureTargetIdleAsync(accountID) + return lease, nil + } + } + } + + if req.ForceNewConn && len(ap.conns)+ap.creating >= effectiveMaxConns { + if idle := p.pickOldestIdleConnLocked(ap); idle != nil { + delete(ap.conns, idle.id) + evicted = append(evicted, idle) + p.metrics.scaleDownTotal.Add(1) + } } if len(ap.conns)+ap.creating < effectiveMaxConns { + connPick := time.Since(pickStartedAt) + p.recordConnPickDuration(connPick) ap.creating++ - p.mu.Unlock() + ap.mu.Unlock() closeOpenAIWSConns(evicted) conn, dialErr := p.dialConn(ctx, req) - p.mu.Lock() - ap = p.ensureAccountPoolLocked(accountID) + ap = p.getOrCreateAccountPool(accountID) + ap.mu.Lock() ap.creating-- if dialErr != nil { - p.mu.Unlock() + ap.prewarmFails++ + ap.prewarmFailAt = time.Now() + ap.mu.Unlock() return nil, dialErr } ap.conns[conn.id] = conn - p.mu.Unlock() + ap.prewarmFails = 0 + ap.prewarmFailAt = time.Time{} + ap.mu.Unlock() p.metrics.acquireCreateTotal.Add(1) if !conn.tryAcquire() { @@ -566,24 +686,33 @@ func (p *openAIWSConnPool) acquire(ctx context.Context, req openAIWSAcquireReque return nil, err } } - lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn} + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: conn, connPick: connPick} p.ensureTargetIdleAsync(accountID) return lease, nil } + if req.ForceNewConn { + p.recordConnPickDuration(time.Since(pickStartedAt)) + ap.mu.Unlock() + closeOpenAIWSConns(evicted) + return nil, errOpenAIWSConnQueueFull + } + target := p.pickLeastBusyConnLocked(ap, req.PreferredConnID) + connPick := time.Since(pickStartedAt) + p.recordConnPickDuration(connPick) if target == nil { - p.mu.Unlock() + ap.mu.Unlock() closeOpenAIWSConns(evicted) return nil, errOpenAIWSConnClosed } if int(target.waiters.Load()) >= p.queueLimitPerConn() { - p.mu.Unlock() + ap.mu.Unlock() closeOpenAIWSConns(evicted) return nil, errOpenAIWSConnQueueFull } target.waiters.Add(1) - p.mu.Unlock() + ap.mu.Unlock() closeOpenAIWSConns(evicted) defer target.waiters.Add(-1) waitStart := time.Now() @@ -609,15 +738,61 @@ func (p *openAIWSConnPool) acquire(ctx context.Context, req openAIWSAcquireReque queueWait := time.Since(waitStart) p.metrics.acquireQueueWaitMs.Add(queueWait.Milliseconds()) - lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: target, queueWait: queueWait, reused: true} + lease := &openAIWSConnLease{pool: p, accountID: accountID, conn: target, queueWait: queueWait, connPick: connPick, reused: true} p.metrics.acquireReuseTotal.Add(1) p.ensureTargetIdleAsync(accountID) return lease, nil } +func (p *openAIWSConnPool) recordConnPickDuration(duration time.Duration) { + if p == nil { + return + } + if duration < 0 { + duration = 0 + } + p.metrics.connPickTotal.Add(1) + p.metrics.connPickMs.Add(duration.Milliseconds()) +} + +func (p *openAIWSConnPool) pickOldestIdleConnLocked(ap *openAIWSAccountPool) *openAIWSConn { + if ap == nil || len(ap.conns) == 0 { + return nil + } + var oldest *openAIWSConn + for _, conn := range ap.conns { + if conn == nil || conn.isLeased() || conn.waiters.Load() > 0 { + continue + } + if oldest == nil || conn.lastUsedAt().Before(oldest.lastUsedAt()) { + oldest = conn + } + } + return oldest +} + +func (p *openAIWSConnPool) getOrCreateAccountPool(accountID int64) *openAIWSAccountPool { + if p == nil || accountID <= 0 { + return nil + } + p.mu.Lock() + defer p.mu.Unlock() + ap, ok := p.accounts[accountID] + if ok && ap != nil { + return ap + } + ap = &openAIWSAccountPool{conns: make(map[string]*openAIWSConn)} + p.accounts[accountID] = ap + return ap +} + +// ensureAccountPoolLocked 兼容旧调用:调用方需已持有 p.mu 锁。 func (p *openAIWSConnPool) ensureAccountPoolLocked(accountID int64) *openAIWSAccountPool { + if p == nil || accountID <= 0 { + return nil + } ap, ok := p.accounts[accountID] - if ok { + if ok && ap != nil { return ap } ap = &openAIWSAccountPool{conns: make(map[string]*openAIWSConn)} @@ -625,6 +800,16 @@ func (p *openAIWSConnPool) ensureAccountPoolLocked(accountID int64) *openAIWSAcc return ap } +func (p *openAIWSConnPool) getAccountPool(accountID int64) (*openAIWSAccountPool, bool) { + if p == nil || accountID <= 0 { + return nil, false + } + p.mu.RLock() + defer p.mu.RUnlock() + ap, ok := p.accounts[accountID] + return ap, ok && ap != nil +} + func (p *openAIWSConnPool) cleanupAccountLocked(ap *openAIWSAccountPool, now time.Time, maxConns int) []*openAIWSConn { if ap == nil { return nil @@ -656,7 +841,8 @@ func (p *openAIWSConnPool) cleanupAccountLocked(ap *openAIWSAccountPool, now tim if maxIdle >= 0 && len(ap.conns) > maxIdle { idleConns := make([]*openAIWSConn, 0, len(ap.conns)) for _, conn := range ap.conns { - if conn.isLeased() { + // 有等待者的连接不能在清理阶段被淘汰,否则等待中的 acquire 会收到 closed 错误。 + if conn.isLeased() || conn.waiters.Load() > 0 { continue } idleConns = append(idleConns, conn) @@ -681,25 +867,6 @@ func (p *openAIWSConnPool) cleanupAccountLocked(ap *openAIWSAccountPool, now tim return evicted } -func (p *openAIWSConnPool) sortedConnsByLoadLocked(ap *openAIWSAccountPool) []*openAIWSConn { - if ap == nil || len(ap.conns) == 0 { - return nil - } - conns := make([]*openAIWSConn, 0, len(ap.conns)) - for _, conn := range ap.conns { - conns = append(conns, conn) - } - sort.SliceStable(conns, func(i, j int) bool { - wi := conns[i].waiters.Load() - wj := conns[j].waiters.Load() - if wi != wj { - return wi < wj - } - return conns[i].lastUsedAt().Before(conns[j].lastUsedAt()) - }) - return conns -} - func (p *openAIWSConnPool) pickLeastBusyConnLocked(ap *openAIWSAccountPool, preferredConnID string) *openAIWSConn { if ap == nil || len(ap.conns) == 0 { return nil @@ -710,11 +877,24 @@ func (p *openAIWSConnPool) pickLeastBusyConnLocked(ap *openAIWSAccountPool, pref return conn } } - conns := p.sortedConnsByLoadLocked(ap) - if len(conns) == 0 { - return nil + var best *openAIWSConn + var bestWaiters int32 + var bestLastUsed time.Time + for _, conn := range ap.conns { + if conn == nil { + continue + } + waiters := conn.waiters.Load() + lastUsed := conn.lastUsedAt() + if best == nil || + waiters < bestWaiters || + (waiters == bestWaiters && lastUsed.Before(bestLastUsed)) { + best = conn + bestWaiters = waiters + bestLastUsed = lastUsed + } } - return conns[0] + return best } func accountPoolLoadLocked(ap *openAIWSAccountPool) (inflight int, waiters int) { @@ -738,12 +918,12 @@ func (p *openAIWSConnPool) AccountPoolLoad(accountID int64) (inflight int, waite if p == nil || accountID <= 0 { return 0, 0, 0 } - p.mu.Lock() - defer p.mu.Unlock() - ap := p.accounts[accountID] - if ap == nil { + ap, ok := p.getAccountPool(accountID) + if !ok || ap == nil { return 0, 0, 0 } + ap.mu.Lock() + defer ap.mu.Unlock() inflight, waiters = accountPoolLoadLocked(ap) return inflight, waiters, len(ap.conns) } @@ -755,14 +935,23 @@ func (p *openAIWSConnPool) ensureTargetIdleAsync(accountID int64) { var req openAIWSAcquireRequest need := 0 - p.mu.Lock() - ap, ok := p.accounts[accountID] - if !ok || ap == nil || ap.lastAcquire == nil { - p.mu.Unlock() + ap, ok := p.getAccountPool(accountID) + if !ok || ap == nil { + return + } + ap.mu.Lock() + defer ap.mu.Unlock() + if ap.lastAcquire == nil { return } if ap.prewarmActive { - p.mu.Unlock() + return + } + now := time.Now() + if !ap.prewarmUntil.IsZero() && now.Before(ap.prewarmUntil) { + return + } + if p.shouldSuppressPrewarmLocked(ap, now) { return } effectiveMaxConns := p.maxConnsHardCap() @@ -772,19 +961,19 @@ func (p *openAIWSConnPool) ensureTargetIdleAsync(accountID int64) { target := p.targetConnCountLocked(ap, effectiveMaxConns) current := len(ap.conns) + ap.creating if current >= target { - p.mu.Unlock() return } need = target - current if need <= 0 { - p.mu.Unlock() return } req = cloneOpenAIWSAcquireRequest(*ap.lastAcquire) ap.prewarmActive = true + if cooldown := p.prewarmCooldown(); cooldown > 0 { + ap.prewarmUntil = now.Add(cooldown) + } ap.creating += need p.metrics.scaleUpTotal.Add(int64(need)) - p.mu.Unlock() go p.prewarmConns(accountID, req, need) } @@ -831,11 +1020,11 @@ func (p *openAIWSConnPool) targetConnCountLocked(ap *openAIWSAccountPool, maxCon func (p *openAIWSConnPool) prewarmConns(accountID int64, req openAIWSAcquireRequest, total int) { defer func() { - p.mu.Lock() - if ap, ok := p.accounts[accountID]; ok && ap != nil { + if ap, ok := p.getAccountPool(accountID); ok && ap != nil { + ap.mu.Lock() ap.prewarmActive = false + ap.mu.Unlock() } - p.mu.Unlock() }() for i := 0; i < total; i++ { @@ -843,29 +1032,32 @@ func (p *openAIWSConnPool) prewarmConns(accountID int64, req openAIWSAcquireRequ conn, err := p.dialConn(ctx, req) cancel() - p.mu.Lock() - ap, ok := p.accounts[accountID] + ap, ok := p.getAccountPool(accountID) if !ok || ap == nil { - p.mu.Unlock() if conn != nil { conn.close() } return } + ap.mu.Lock() if ap.creating > 0 { ap.creating-- } if err != nil { - p.mu.Unlock() + ap.prewarmFails++ + ap.prewarmFailAt = time.Now() + ap.mu.Unlock() continue } if len(ap.conns) >= p.effectiveMaxConnsByAccount(req.Account) { - p.mu.Unlock() + ap.mu.Unlock() conn.close() continue } ap.conns[conn.id] = conn - p.mu.Unlock() + ap.prewarmFails = 0 + ap.prewarmFailAt = time.Time{} + ap.mu.Unlock() } } @@ -874,14 +1066,15 @@ func (p *openAIWSConnPool) evictConn(accountID int64, connID string) { return } var conn *openAIWSConn - p.mu.Lock() - if ap, ok := p.accounts[accountID]; ok { + ap, ok := p.getAccountPool(accountID) + if ok && ap != nil { + ap.mu.Lock() if c, exists := ap.conns[connID]; exists { conn = c delete(ap.conns, connID) } + ap.mu.Unlock() } - p.mu.Unlock() if conn != nil { conn.close() } @@ -1006,6 +1199,32 @@ func (p *openAIWSConnPool) targetUtilization() float64 { return 0.7 } +func (p *openAIWSConnPool) prewarmCooldown() time.Duration { + if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.PrewarmCooldownMS > 0 { + return time.Duration(p.cfg.Gateway.OpenAIWS.PrewarmCooldownMS) * time.Millisecond + } + return 0 +} + +func (p *openAIWSConnPool) shouldSuppressPrewarmLocked(ap *openAIWSAccountPool, now time.Time) bool { + if ap == nil { + return true + } + if ap.prewarmFails <= 0 { + return false + } + if ap.prewarmFailAt.IsZero() { + ap.prewarmFails = 0 + return false + } + if now.Sub(ap.prewarmFailAt) > openAIWSPrewarmFailureWindow { + ap.prewarmFails = 0 + ap.prewarmFailAt = time.Time{} + return false + } + return ap.prewarmFails >= openAIWSPrewarmFailureSuppress +} + func (p *openAIWSConnPool) dialTimeout() time.Duration { if p != nil && p.cfg != nil && p.cfg.Gateway.OpenAIWS.DialTimeoutSeconds > 0 { return time.Duration(p.cfg.Gateway.OpenAIWS.DialTimeoutSeconds) * time.Second diff --git a/backend/internal/service/openai_ws_pool_benchmark_test.go b/backend/internal/service/openai_ws_pool_benchmark_test.go new file mode 100644 index 000000000..bff74b626 --- /dev/null +++ b/backend/internal/service/openai_ws_pool_benchmark_test.go @@ -0,0 +1,58 @@ +package service + +import ( + "context" + "errors" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" +) + +func BenchmarkOpenAIWSPoolAcquire(b *testing.B) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 8 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 4 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 256 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 1 + + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(&openAIWSCountingDialer{}) + + account := &Account{ID: 1001, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + req := openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + } + ctx := context.Background() + + lease, err := pool.Acquire(ctx, req) + if err != nil { + b.Fatalf("warm acquire failed: %v", err) + } + lease.Release() + + b.ReportAllocs() + b.ResetTimer() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var ( + got *openAIWSConnLease + acquireErr error + ) + for retry := 0; retry < 3; retry++ { + got, acquireErr = pool.Acquire(ctx, req) + if acquireErr == nil { + break + } + if !errors.Is(acquireErr, errOpenAIWSConnClosed) { + break + } + } + if acquireErr != nil { + b.Fatalf("acquire failed: %v", acquireErr) + } + got.Release() + } + }) +} diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index 3f9e84d5e..73867f14d 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -18,7 +18,7 @@ func TestOpenAIWSConnPool_CleanupStaleAndTrimIdle(t *testing.T) { pool := newOpenAIWSConnPool(cfg) accountID := int64(10) - ap := pool.ensureAccountPoolLocked(accountID) + ap := pool.getOrCreateAccountPool(accountID) stale := newOpenAIWSConn("stale", accountID, nil, nil) stale.createdAtNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) @@ -49,7 +49,7 @@ func TestOpenAIWSConnPool_TargetConnCountAdaptive(t *testing.T) { cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.5 pool := newOpenAIWSConnPool(cfg) - ap := pool.ensureAccountPoolLocked(88) + ap := pool.getOrCreateAccountPool(88) conn1 := newOpenAIWSConn("c1", 88, nil, nil) conn2 := newOpenAIWSConn("c2", 88, nil, nil) @@ -79,7 +79,7 @@ func TestOpenAIWSConnPool_TargetConnCountMinIdleZero(t *testing.T) { cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.8 pool := newOpenAIWSConnPool(cfg) - ap := pool.ensureAccountPoolLocked(66) + ap := pool.getOrCreateAccountPool(66) target := pool.targetConnCountLocked(ap, pool.maxConnsHardCap()) require.Equal(t, 0, target, "min_idle=0 且无负载时应允许缩容到 0") @@ -97,26 +97,138 @@ func TestOpenAIWSConnPool_EnsureTargetIdleAsync(t *testing.T) { accountID := int64(77) account := &Account{ID: accountID, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} - pool.mu.Lock() - ap := pool.ensureAccountPoolLocked(accountID) + ap := pool.getOrCreateAccountPool(accountID) + ap.mu.Lock() ap.lastAcquire = &openAIWSAcquireRequest{ Account: account, WSURL: "wss://example.com/v1/responses", } - pool.mu.Unlock() + ap.mu.Unlock() pool.ensureTargetIdleAsync(accountID) require.Eventually(t, func() bool { - pool.mu.Lock() - defer pool.mu.Unlock() - return len(pool.accounts[accountID].conns) >= 2 + ap, ok := pool.getAccountPool(accountID) + if !ok || ap == nil { + return false + } + ap.mu.Lock() + defer ap.mu.Unlock() + return len(ap.conns) >= 2 }, 2*time.Second, 20*time.Millisecond) metrics := pool.SnapshotMetrics() require.GreaterOrEqual(t, metrics.ScaleUpTotal, int64(2)) } +func TestOpenAIWSConnPool_EnsureTargetIdleAsyncCooldown(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 4 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 2 + cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 1 + cfg.Gateway.OpenAIWS.PrewarmCooldownMS = 500 + + pool := newOpenAIWSConnPool(cfg) + dialer := &openAIWSCountingDialer{} + pool.setClientDialerForTest(dialer) + + accountID := int64(178) + account := &Account{ID: accountID, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + ap := pool.getOrCreateAccountPool(accountID) + ap.mu.Lock() + ap.lastAcquire = &openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + } + ap.mu.Unlock() + + pool.ensureTargetIdleAsync(accountID) + require.Eventually(t, func() bool { + ap, ok := pool.getAccountPool(accountID) + if !ok || ap == nil { + return false + } + ap.mu.Lock() + defer ap.mu.Unlock() + return len(ap.conns) >= 2 && !ap.prewarmActive + }, 2*time.Second, 20*time.Millisecond) + firstDialCount := dialer.DialCount() + require.GreaterOrEqual(t, firstDialCount, 2) + + // 人工制造缺口触发新一轮预热需求。 + ap, ok := pool.getAccountPool(accountID) + require.True(t, ok) + require.NotNil(t, ap) + ap.mu.Lock() + for id := range ap.conns { + delete(ap.conns, id) + break + } + ap.mu.Unlock() + + pool.ensureTargetIdleAsync(accountID) + time.Sleep(120 * time.Millisecond) + require.Equal(t, firstDialCount, dialer.DialCount(), "cooldown 窗口内不应再次触发预热") + + time.Sleep(450 * time.Millisecond) + pool.ensureTargetIdleAsync(accountID) + require.Eventually(t, func() bool { + return dialer.DialCount() > firstDialCount + }, 2*time.Second, 20*time.Millisecond) +} + +func TestOpenAIWSConnPool_EnsureTargetIdleAsyncFailureSuppress(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 1 + cfg.Gateway.OpenAIWS.PrewarmCooldownMS = 0 + + pool := newOpenAIWSConnPool(cfg) + dialer := &openAIWSAlwaysFailDialer{} + pool.setClientDialerForTest(dialer) + + accountID := int64(279) + account := &Account{ID: accountID, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + ap := pool.getOrCreateAccountPool(accountID) + ap.mu.Lock() + ap.lastAcquire = &openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + } + ap.mu.Unlock() + + pool.ensureTargetIdleAsync(accountID) + require.Eventually(t, func() bool { + ap, ok := pool.getAccountPool(accountID) + if !ok || ap == nil { + return false + } + ap.mu.Lock() + defer ap.mu.Unlock() + return !ap.prewarmActive + }, 2*time.Second, 20*time.Millisecond) + + pool.ensureTargetIdleAsync(accountID) + require.Eventually(t, func() bool { + ap, ok := pool.getAccountPool(accountID) + if !ok || ap == nil { + return false + } + ap.mu.Lock() + defer ap.mu.Unlock() + return !ap.prewarmActive + }, 2*time.Second, 20*time.Millisecond) + require.Equal(t, 2, dialer.DialCount()) + + // 连续失败达到阈值后,新的预热触发应被抑制,不再继续拨号。 + pool.ensureTargetIdleAsync(accountID) + time.Sleep(120 * time.Millisecond) + require.Equal(t, 2, dialer.DialCount()) +} + func TestOpenAIWSConnPool_AcquireQueueWaitMetrics(t *testing.T) { cfg := &config.Config{} cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 @@ -131,11 +243,13 @@ func TestOpenAIWSConnPool_AcquireQueueWaitMetrics(t *testing.T) { pool.mu.Lock() ap := pool.ensureAccountPoolLocked(accountID) + ap.mu.Lock() ap.conns[conn.id] = conn ap.lastAcquire = &openAIWSAcquireRequest{ Account: account, WSURL: "wss://example.com/v1/responses", } + ap.mu.Unlock() pool.mu.Unlock() go func() { @@ -156,6 +270,39 @@ func TestOpenAIWSConnPool_AcquireQueueWaitMetrics(t *testing.T) { metrics := pool.SnapshotMetrics() require.GreaterOrEqual(t, metrics.AcquireQueueWaitTotal, int64(1)) require.Greater(t, metrics.AcquireQueueWaitMsTotal, int64(0)) + require.GreaterOrEqual(t, metrics.ConnPickTotal, int64(1)) +} + +func TestOpenAIWSConnPool_ForceNewConnSkipsReuse(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 2 + + pool := newOpenAIWSConnPool(cfg) + dialer := &openAIWSCountingDialer{} + pool.setClientDialerForTest(dialer) + + account := &Account{ID: 123, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + + lease1, err := pool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + }) + require.NoError(t, err) + require.NotNil(t, lease1) + lease1.Release() + + lease2, err := pool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + ForceNewConn: true, + }) + require.NoError(t, err) + require.NotNil(t, lease2) + lease2.Release() + + require.Equal(t, 2, dialer.DialCount(), "ForceNewConn=true 时应跳过空闲连接复用并新建连接") } func TestOpenAIWSConnPool_EffectiveMaxConnsByAccount(t *testing.T) { @@ -197,6 +344,25 @@ func TestOpenAIWSConnPool_EffectiveMaxConnsDisabledFallbackHardCap(t *testing.T) require.Equal(t, 8, pool.effectiveMaxConnsByAccount(account), "关闭动态模式后应保持旧行为") } +func TestOpenAIWSConnLease_ReadMessageWithContextTimeout_PerRead(t *testing.T) { + conn := newOpenAIWSConn("timeout", 1, &openAIWSBlockingConn{readDelay: 80 * time.Millisecond}, nil) + lease := &openAIWSConnLease{conn: conn} + + _, err := lease.ReadMessageWithContextTimeout(context.Background(), 20*time.Millisecond) + require.Error(t, err) + require.ErrorIs(t, err, context.DeadlineExceeded) + + payload, err := lease.ReadMessageWithContextTimeout(context.Background(), 150*time.Millisecond) + require.NoError(t, err) + require.Contains(t, string(payload), "response.completed") + + parentCtx, cancel := context.WithCancel(context.Background()) + cancel() + _, err = lease.ReadMessageWithContextTimeout(parentCtx, 150*time.Millisecond) + require.Error(t, err) + require.ErrorIs(t, err, context.Canceled) +} + type openAIWSFakeDialer struct{} func (d *openAIWSFakeDialer) Dial( @@ -212,6 +378,60 @@ func (d *openAIWSFakeDialer) Dial( return &openAIWSFakeConn{}, 0, nil, nil } +type openAIWSCountingDialer struct { + mu sync.Mutex + dialCount int +} + +type openAIWSAlwaysFailDialer struct { + mu sync.Mutex + dialCount int +} + +func (d *openAIWSCountingDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + _ = ctx + _ = wsURL + _ = headers + _ = proxyURL + d.mu.Lock() + d.dialCount++ + d.mu.Unlock() + return &openAIWSFakeConn{}, 0, nil, nil +} + +func (d *openAIWSCountingDialer) DialCount() int { + d.mu.Lock() + defer d.mu.Unlock() + return d.dialCount +} + +func (d *openAIWSAlwaysFailDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + _ = ctx + _ = wsURL + _ = headers + _ = proxyURL + d.mu.Lock() + d.dialCount++ + d.mu.Unlock() + return nil, 503, nil, errors.New("dial failed") +} + +func (d *openAIWSAlwaysFailDialer) DialCount() int { + d.mu.Lock() + defer d.mu.Unlock() + return d.dialCount +} + type openAIWSFakeConn struct { mu sync.Mutex closed bool @@ -252,6 +472,41 @@ func (c *openAIWSFakeConn) Close() error { return nil } +type openAIWSBlockingConn struct { + readDelay time.Duration +} + +func (c *openAIWSBlockingConn) WriteJSON(ctx context.Context, value any) error { + _ = ctx + _ = value + return nil +} + +func (c *openAIWSBlockingConn) ReadMessage(ctx context.Context) ([]byte, error) { + delay := c.readDelay + if delay <= 0 { + delay = 10 * time.Millisecond + } + timer := time.NewTimer(delay) + defer timer.Stop() + + select { + case <-ctx.Done(): + return nil, ctx.Err() + case <-timer.C: + return []byte(`{"type":"response.completed","response":{"id":"resp_blocking"}}`), nil + } +} + +func (c *openAIWSBlockingConn) Ping(ctx context.Context) error { + _ = ctx + return nil +} + +func (c *openAIWSBlockingConn) Close() error { + return nil +} + type openAIWSNilConnDialer struct{} func (d *openAIWSNilConnDialer) Dial( @@ -283,3 +538,23 @@ func TestOpenAIWSConnPool_DialConnNilConnection(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "nil connection") } + +func TestOpenAIWSConnPool_SnapshotTransportMetrics(t *testing.T) { + cfg := &config.Config{} + pool := newOpenAIWSConnPool(cfg) + + dialer, ok := pool.clientDialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + _, err := dialer.proxyHTTPClient("http://127.0.0.1:28080") + require.NoError(t, err) + _, err = dialer.proxyHTTPClient("http://127.0.0.1:28080") + require.NoError(t, err) + _, err = dialer.proxyHTTPClient("http://127.0.0.1:28081") + require.NoError(t, err) + + snapshot := pool.SnapshotTransportMetrics() + require.Equal(t, int64(1), snapshot.ProxyClientCacheHits) + require.Equal(t, int64(2), snapshot.ProxyClientCacheMisses) + require.InDelta(t, 1.0/3.0, snapshot.TransportReuseRatio, 0.0001) +} diff --git a/backend/internal/service/openai_ws_protocol_forward_test.go b/backend/internal/service/openai_ws_protocol_forward_test.go index 92a31b1cf..e4ec57f39 100644 --- a/backend/internal/service/openai_ws_protocol_forward_test.go +++ b/backend/internal/service/openai_ws_protocol_forward_test.go @@ -6,10 +6,13 @@ import ( "net/http" "net/http/httptest" "strings" + "sync/atomic" "testing" + "time" "github.com/Wei-Shaw/sub2api/internal/config" "github.com/gin-gonic/gin" + "github.com/gorilla/websocket" "github.com/stretchr/testify/require" "github.com/tidwall/gjson" ) @@ -67,9 +70,9 @@ func TestOpenAIGatewayService_Forward_PreservePreviousResponseIDWhenWSEnabled(t body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_123","input":[{"type":"input_text","text":"hello"}]}`) result, err := svc.Forward(context.Background(), c, account, body) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, "resp_123", gjson.GetBytes(upstream.lastBody, "previous_response_id").String()) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "WS 模式下失败时不应回退 HTTP") } func TestOpenAIGatewayService_Forward_RemovePreviousResponseIDWhenWSDisabled(t *testing.T) { @@ -183,9 +186,10 @@ func TestOpenAIGatewayService_Forward_WSv2Dial426FallbackHTTP(t *testing.T) { body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_426","input":[{"type":"input_text","text":"hello"}]}`) result, err := svc.Forward(context.Background(), c, account, body) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, "resp_426", gjson.GetBytes(upstream.lastBody, "previous_response_id").String(), "426 回退 HTTP 后仍应保留 previous_response_id") + require.Error(t, err) + require.Nil(t, result) + require.Contains(t, err.Error(), "upgrade_required") + require.Nil(t, upstream.lastReq, "WS 模式下不应再回退 HTTP") } func TestOpenAIGatewayService_Forward_WSv2FallbackCoolingSkipWS(t *testing.T) { @@ -243,13 +247,12 @@ func TestOpenAIGatewayService_Forward_WSv2FallbackCoolingSkipWS(t *testing.T) { svc.markOpenAIWSFallbackCooling(account.ID, "upgrade_required") body := []byte(`{"model":"gpt-5.1","stream":false,"previous_response_id":"resp_cooling","input":[{"type":"input_text","text":"hello"}]}`) result, err := svc.Forward(context.Background(), c, account, body) - require.NoError(t, err) - require.NotNil(t, result) - require.Equal(t, "resp_cooling", gjson.GetBytes(upstream.lastBody, "previous_response_id").String()) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "WS 模式下不应再回退 HTTP") - v, ok := c.Get("openai_ws_fallback_cooling") - require.True(t, ok) - require.Equal(t, true, v) + _, ok := c.Get("openai_ws_fallback_cooling") + require.False(t, ok, "已移除 fallback cooling 快捷回退路径") } func TestOpenAIGatewayService_Forward_ReturnErrorWhenOnlyWSv1Enabled(t *testing.T) { @@ -393,3 +396,427 @@ func TestOpenAIGatewayService_Forward_WSv2FallbackWhenResponseAlreadyWrittenRetu require.Contains(t, err.Error(), "ws fallback") require.Nil(t, upstream.lastReq, "已写下游响应时,不应再回退 HTTP") } + +func TestOpenAIGatewayService_Forward_WSv2StreamEarlyCloseFallbackHTTP(t *testing.T) { + gin.SetMode(gin.TestMode) + + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var req map[string]any + if err := conn.ReadJSON(&req); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + + // 仅发送 response.created(非 token 事件)后立即关闭, + // 模拟线上“上游早期内部错误断连”的场景。 + if err := conn.WriteJSON(map[string]any{ + "type": "response.created", + "response": map[string]any{ + "id": "resp_ws_created_only", + "model": "gpt-5.3-codex", + }, + }); err != nil { + t.Errorf("write response.created failed: %v", err) + return + } + closePayload := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, "") + _ = conn.WriteControl(websocket.CloseMessage, closePayload, time.Now().Add(time.Second)) + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"text/event-stream"}}, + Body: io.NopCloser(strings.NewReader( + "data: {\"type\":\"response.output_text.delta\",\"delta\":\"ok\"}\n\n" + + "data: {\"type\":\"response.completed\",\"response\":{\"id\":\"resp_http_fallback\",\"usage\":{\"input_tokens\":2,\"output_tokens\":1}}}\n\n" + + "data: [DONE]\n\n", + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 88, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.3-codex","stream":true,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "WS 早期断连后不应再回退 HTTP") + require.Empty(t, rec.Body.String(), "未产出 token 前上游断连时不应写入下游半截流") +} + +func TestOpenAIGatewayService_Forward_WSv2RetryFiveTimesThenFallbackHTTP(t *testing.T) { + gin.SetMode(gin.TestMode) + + var wsAttempts atomic.Int32 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wsAttempts.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var req map[string]any + if err := conn.ReadJSON(&req); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + closePayload := websocket.FormatCloseMessage(websocket.CloseInternalServerErr, "") + _ = conn.WriteControl(websocket.CloseMessage, closePayload, time.Now().Add(time.Second)) + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"text/event-stream"}}, + Body: io.NopCloser(strings.NewReader( + "data: {\"type\":\"response.output_text.delta\",\"delta\":\"ok\"}\n\n" + + "data: {\"type\":\"response.completed\",\"response\":{\"id\":\"resp_retry_http_fallback\",\"usage\":{\"input_tokens\":2,\"output_tokens\":1}}}\n\n" + + "data: [DONE]\n\n", + )), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 89, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.3-codex","stream":true,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "WS 重连耗尽后不应再回退 HTTP") + require.Equal(t, int32(openAIWSReconnectRetryLimit+1), wsAttempts.Load()) +} + +func TestOpenAIGatewayService_Forward_WSv2PolicyViolationFastFallbackHTTP(t *testing.T) { + gin.SetMode(gin.TestMode) + + var wsAttempts atomic.Int32 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wsAttempts.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var req map[string]any + if err := conn.ReadJSON(&req); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + closePayload := websocket.FormatCloseMessage(websocket.ClosePolicyViolation, "") + _ = conn.WriteControl(websocket.CloseMessage, closePayload, time.Now().Add(time.Second)) + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"id":"resp_policy_fallback","usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + cfg.Gateway.OpenAIWS.RetryBackoffInitialMS = 1 + cfg.Gateway.OpenAIWS.RetryBackoffMaxMS = 2 + cfg.Gateway.OpenAIWS.RetryJitterRatio = 0 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 8901, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.3-codex","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "策略违规关闭后不应回退 HTTP") + require.Equal(t, int32(1), wsAttempts.Load(), "策略违规不应进行 WS 重试") +} + +func TestOpenAIGatewayService_Forward_WSv2ConnectionLimitReachedRetryThenFallbackHTTP(t *testing.T) { + gin.SetMode(gin.TestMode) + + var wsAttempts atomic.Int32 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wsAttempts.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var req map[string]any + if err := conn.ReadJSON(&req); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + _ = conn.WriteJSON(map[string]any{ + "type": "error", + "error": map[string]any{ + "code": "websocket_connection_limit_reached", + "type": "server_error", + "message": "websocket connection limit reached", + }, + }) + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"id":"resp_http_retry_limit","usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 90, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.3-codex","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "触发 websocket_connection_limit_reached 后不应回退 HTTP") + require.Equal(t, int32(openAIWSReconnectRetryLimit+1), wsAttempts.Load()) +} + +func TestOpenAIGatewayService_Forward_WSv2PreviousResponseNotFoundDropsPreviousResponseIDOnHTTPFallback(t *testing.T) { + gin.SetMode(gin.TestMode) + + var wsAttempts atomic.Int32 + upgrader := websocket.Upgrader{CheckOrigin: func(r *http.Request) bool { return true }} + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + wsAttempts.Add(1) + conn, err := upgrader.Upgrade(w, r, nil) + if err != nil { + t.Errorf("upgrade websocket failed: %v", err) + return + } + defer func() { + _ = conn.Close() + }() + + var req map[string]any + if err := conn.ReadJSON(&req); err != nil { + t.Errorf("read ws request failed: %v", err) + return + } + _ = conn.WriteJSON(map[string]any{ + "type": "error", + "error": map[string]any{ + "code": "previous_response_not_found", + "type": "invalid_request_error", + "message": "previous response not found", + }, + }) + })) + defer wsServer.Close() + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "custom-client/1.0") + + upstream := &httpUpstreamRecorder{ + resp: &http.Response{ + StatusCode: http.StatusOK, + Header: http.Header{"Content-Type": []string{"application/json"}}, + Body: io.NopCloser(strings.NewReader(`{"id":"resp_http_drop_prev","usage":{"input_tokens":1,"output_tokens":1}}`)), + }, + } + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: upstream, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + } + + account := &Account{ + ID: 91, + Name: "openai-apikey", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "base_url": wsServer.URL, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"gpt-5.3-codex","stream":false,"previous_response_id":"resp_prev_missing","input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.Error(t, err) + require.Nil(t, result) + require.Nil(t, upstream.lastReq, "previous_response_not_found 不应回退 HTTP") + require.Equal(t, int32(1), wsAttempts.Load(), "previous_response_not_found 不应进行 WS 重试") +} diff --git a/backend/internal/service/openai_ws_state_store.go b/backend/internal/service/openai_ws_state_store.go index 8758e9ff2..349009458 100644 --- a/backend/internal/service/openai_ws_state_store.go +++ b/backend/internal/service/openai_ws_state_store.go @@ -14,6 +14,8 @@ import ( const ( openAIWSResponseAccountCachePrefix = "openai:response:" openAIWSStateStoreCleanupInterval = time.Minute + // 分桶增量清理,避免在写锁下做全量 map 扫描。 + openAIWSStateStoreCleanupScanLimit = 128 ) type openAIWSAccountBinding struct { @@ -31,6 +33,11 @@ type openAIWSTurnStateBinding struct { expiresAt time.Time } +type openAIWSSessionConnBinding struct { + connID string + expiresAt time.Time +} + // OpenAIWSStateStore 管理 WSv2 的粘连状态。 // - response_id -> account_id 用于续链路由 // - response_id -> conn_id 用于连接内上下文复用 @@ -49,6 +56,10 @@ type OpenAIWSStateStore interface { BindSessionTurnState(groupID int64, sessionHash, turnState string, ttl time.Duration) GetSessionTurnState(groupID int64, sessionHash string) (string, bool) DeleteSessionTurnState(groupID int64, sessionHash string) + + BindSessionConn(groupID int64, sessionHash, connID string, ttl time.Duration) + GetSessionConn(groupID int64, sessionHash string) (string, bool) + DeleteSessionConn(groupID int64, sessionHash string) } type defaultOpenAIWSStateStore struct { @@ -58,6 +69,7 @@ type defaultOpenAIWSStateStore struct { responseToAccount map[string]openAIWSAccountBinding responseToConn map[string]openAIWSConnBinding sessionToTurnState map[string]openAIWSTurnStateBinding + sessionToConn map[string]openAIWSSessionConnBinding lastCleanupUnixNano atomic.Int64 } @@ -69,6 +81,7 @@ func NewOpenAIWSStateStore(cache GatewayCache) OpenAIWSStateStore { responseToAccount: make(map[string]openAIWSAccountBinding, 256), responseToConn: make(map[string]openAIWSConnBinding, 256), sessionToTurnState: make(map[string]openAIWSTurnStateBinding, 256), + sessionToConn: make(map[string]openAIWSSessionConnBinding, 256), } store.lastCleanupUnixNano.Store(time.Now().UnixNano()) return store @@ -228,6 +241,50 @@ func (s *defaultOpenAIWSStateStore) DeleteSessionTurnState(groupID int64, sessio s.mu.Unlock() } +func (s *defaultOpenAIWSStateStore) BindSessionConn(groupID int64, sessionHash, connID string, ttl time.Duration) { + key := openAIWSSessionTurnStateKey(groupID, sessionHash) + conn := strings.TrimSpace(connID) + if key == "" || conn == "" { + return + } + ttl = normalizeOpenAIWSTTL(ttl) + s.maybeCleanup() + + s.mu.Lock() + s.sessionToConn[key] = openAIWSSessionConnBinding{ + connID: conn, + expiresAt: time.Now().Add(ttl), + } + s.mu.Unlock() +} + +func (s *defaultOpenAIWSStateStore) GetSessionConn(groupID int64, sessionHash string) (string, bool) { + key := openAIWSSessionTurnStateKey(groupID, sessionHash) + if key == "" { + return "", false + } + s.maybeCleanup() + + now := time.Now() + s.mu.RLock() + binding, ok := s.sessionToConn[key] + s.mu.RUnlock() + if !ok || now.After(binding.expiresAt) || strings.TrimSpace(binding.connID) == "" { + return "", false + } + return binding.connID, true +} + +func (s *defaultOpenAIWSStateStore) DeleteSessionConn(groupID int64, sessionHash string) { + key := openAIWSSessionTurnStateKey(groupID, sessionHash) + if key == "" { + return + } + s.mu.Lock() + delete(s.sessionToConn, key) + s.mu.Unlock() +} + func (s *defaultOpenAIWSStateStore) maybeCleanup() { if s == nil { return @@ -243,19 +300,84 @@ func (s *defaultOpenAIWSStateStore) maybeCleanup() { s.mu.Lock() defer s.mu.Unlock() - for key, binding := range s.responseToAccount { + cleanupExpiredAccountBindings(s.responseToAccount, now, openAIWSStateStoreCleanupScanLimit) + cleanupExpiredConnBindings(s.responseToConn, now, openAIWSStateStoreCleanupScanLimit) + cleanupExpiredTurnStateBindings(s.sessionToTurnState, now, openAIWSStateStoreCleanupScanLimit) + cleanupExpiredSessionConnBindings(s.sessionToConn, now, openAIWSStateStoreCleanupScanLimit) +} + +func cleanupExpiredAccountBindings(bindings map[string]openAIWSAccountBinding, now time.Time, limit int) { + if len(bindings) == 0 { + return + } + if limit <= 0 { + limit = len(bindings) + } + scanned := 0 + for key, binding := range bindings { + if now.After(binding.expiresAt) { + delete(bindings, key) + } + scanned++ + if scanned >= limit { + break + } + } +} + +func cleanupExpiredConnBindings(bindings map[string]openAIWSConnBinding, now time.Time, limit int) { + if len(bindings) == 0 { + return + } + if limit <= 0 { + limit = len(bindings) + } + scanned := 0 + for key, binding := range bindings { if now.After(binding.expiresAt) { - delete(s.responseToAccount, key) + delete(bindings, key) + } + scanned++ + if scanned >= limit { + break } } - for key, binding := range s.responseToConn { +} + +func cleanupExpiredTurnStateBindings(bindings map[string]openAIWSTurnStateBinding, now time.Time, limit int) { + if len(bindings) == 0 { + return + } + if limit <= 0 { + limit = len(bindings) + } + scanned := 0 + for key, binding := range bindings { if now.After(binding.expiresAt) { - delete(s.responseToConn, key) + delete(bindings, key) + } + scanned++ + if scanned >= limit { + break } } - for key, binding := range s.sessionToTurnState { +} + +func cleanupExpiredSessionConnBindings(bindings map[string]openAIWSSessionConnBinding, now time.Time, limit int) { + if len(bindings) == 0 { + return + } + if limit <= 0 { + limit = len(bindings) + } + scanned := 0 + for key, binding := range bindings { if now.After(binding.expiresAt) { - delete(s.sessionToTurnState, key) + delete(bindings, key) + } + scanned++ + if scanned >= limit { + break } } } diff --git a/backend/internal/service/openai_ws_state_store_test.go b/backend/internal/service/openai_ws_state_store_test.go index 5e24310b8..5d6d5040d 100644 --- a/backend/internal/service/openai_ws_state_store_test.go +++ b/backend/internal/service/openai_ws_state_store_test.go @@ -2,6 +2,7 @@ package service import ( "context" + "fmt" "testing" "time" @@ -56,6 +57,23 @@ func TestOpenAIWSStateStore_SessionTurnStateTTL(t *testing.T) { require.False(t, ok) } +func TestOpenAIWSStateStore_SessionConnTTL(t *testing.T) { + store := NewOpenAIWSStateStore(nil) + store.BindSessionConn(9, "session_hash_conn_1", "conn_1", 30*time.Millisecond) + + connID, ok := store.GetSessionConn(9, "session_hash_conn_1") + require.True(t, ok) + require.Equal(t, "conn_1", connID) + + // group 隔离 + _, ok = store.GetSessionConn(10, "session_hash_conn_1") + require.False(t, ok) + + time.Sleep(60 * time.Millisecond) + _, ok = store.GetSessionConn(9, "session_hash_conn_1") + require.False(t, ok) +} + func TestOpenAIWSStateStore_GetResponseAccount_NoStaleAfterCacheMiss(t *testing.T) { cache := &stubGatewayCache{sessionBindings: map[string]int64{}} store := NewOpenAIWSStateStore(cache) @@ -74,3 +92,38 @@ func TestOpenAIWSStateStore_GetResponseAccount_NoStaleAfterCacheMiss(t *testing. require.NoError(t, err) require.Zero(t, accountID, "上游缓存失效后不应继续命中本地陈旧映射") } + +func TestOpenAIWSStateStore_MaybeCleanupIncremental(t *testing.T) { + raw := NewOpenAIWSStateStore(nil) + store, ok := raw.(*defaultOpenAIWSStateStore) + require.True(t, ok) + + expiredAt := time.Now().Add(-time.Minute) + total := openAIWSStateStoreCleanupScanLimit * 2 + store.mu.Lock() + for i := 0; i < total; i++ { + store.responseToConn[fmt.Sprintf("resp_%d", i)] = openAIWSConnBinding{ + connID: "conn_incremental", + expiresAt: expiredAt, + } + } + store.mu.Unlock() + + store.lastCleanupUnixNano.Store(time.Now().Add(-2 * openAIWSStateStoreCleanupInterval).UnixNano()) + store.maybeCleanup() + + store.mu.RLock() + remaining := len(store.responseToConn) + store.mu.RUnlock() + require.Greater(t, remaining, 0, "单次 cleanup 应为增量扫描") + require.Less(t, remaining, total, "单次 cleanup 应至少清理一部分过期键") + + for i := 0; i < 4 && remaining > 0; i++ { + store.lastCleanupUnixNano.Store(time.Now().Add(-2 * openAIWSStateStoreCleanupInterval).UnixNano()) + store.maybeCleanup() + store.mu.RLock() + remaining = len(store.responseToConn) + store.mu.RUnlock() + } + require.Zero(t, remaining, "多轮增量 cleanup 后应清空过期键") +} diff --git a/backend/internal/service/ops_upstream_context.go b/backend/internal/service/ops_upstream_context.go index c05945786..21e09c43e 100644 --- a/backend/internal/service/ops_upstream_context.go +++ b/backend/internal/service/ops_upstream_context.go @@ -29,6 +29,7 @@ const ( OpsTimeToFirstTokenMsKey = "ops_time_to_first_token_ms" // OpenAI WS 关键观测字段 OpsOpenAIWSQueueWaitMsKey = "ops_openai_ws_queue_wait_ms" + OpsOpenAIWSConnPickMsKey = "ops_openai_ws_conn_pick_ms" OpsOpenAIWSConnReusedKey = "ops_openai_ws_conn_reused" OpsOpenAIWSConnIDKey = "ops_openai_ws_conn_id" diff --git a/deploy/.env.example b/deploy/.env.example index 290f918ad..9f2ff13ee 100644 --- a/deploy/.env.example +++ b/deploy/.env.example @@ -66,11 +66,15 @@ LOG_SAMPLING_INITIAL=100 # 之后每 N 条保留 1 条 LOG_SAMPLING_THEREAFTER=100 -# Global max request body size in bytes (default: 100MB) -# 全局最大请求体大小(字节,默认 100MB) +# Global max request body size in bytes (default: 256MB) +# 全局最大请求体大小(字节,默认 256MB) # Applies to all requests, especially important for h2c first request memory protection # 适用于所有请求,对 h2c 第一请求的内存保护尤为重要 -SERVER_MAX_REQUEST_BODY_SIZE=104857600 +SERVER_MAX_REQUEST_BODY_SIZE=268435456 + +# Gateway max request body size in bytes (default: 256MB) +# 网关请求体最大字节数(默认 256MB) +GATEWAY_MAX_BODY_SIZE=268435456 # Enable HTTP/2 Cleartext (h2c) for client connections # 启用 HTTP/2 Cleartext (h2c) 客户端连接 diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index 6d6e9cda7..cc9ae7d69 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -27,11 +27,11 @@ server: # Trusted proxies for X-Forwarded-For parsing (CIDR/IP). Empty disables trusted proxies. # 信任的代理地址(CIDR/IP 格式),用于解析 X-Forwarded-For 头。留空则禁用代理信任。 trusted_proxies: [] - # Global max request body size in bytes (default: 100MB) - # 全局最大请求体大小(字节,默认 100MB) + # Global max request body size in bytes (default: 256MB) + # 全局最大请求体大小(字节,默认 256MB) # Applies to all requests, especially important for h2c first request memory protection # 适用于所有请求,对 h2c 第一请求的内存保护尤为重要 - max_request_body_size: 104857600 + max_request_body_size: 268435456 # HTTP/2 Cleartext (h2c) configuration # HTTP/2 Cleartext (h2c) 配置 h2c: @@ -143,9 +143,9 @@ gateway: # Timeout for waiting upstream response headers (seconds) # 等待上游响应头超时时间(秒) response_header_timeout: 600 - # Max request body size in bytes (default: 100MB) - # 请求体最大字节数(默认 100MB) - max_body_size: 104857600 + # Max request body size in bytes (default: 256MB) + # 请求体最大字节数(默认 256MB) + max_body_size: 268435456 # Max bytes to read for non-stream upstream responses (default: 8MB) # 非流式上游响应体读取上限(默认 8MB) upstream_response_read_max_bytes: 8388608 @@ -199,10 +199,10 @@ gateway: # OpenAI 透传模式是否放行客户端超时头(如 x-stainless-timeout) # 默认 false:过滤超时头,降低上游提前断流风险。 openai_passthrough_allow_timeout_headers: false - # OpenAI Responses WebSocket 配置(默认关闭,不影响现网 HTTP 线路) + # OpenAI Responses WebSocket 配置(默认开启,可按需回滚到 HTTP) openai_ws: - # 全局总开关,默认 false;关闭时所有请求保持原有 HTTP/SSE 路由 - enabled: false + # 全局总开关,默认 true;关闭时所有请求保持原有 HTTP/SSE 路由 + enabled: true # 按账号类型细分开关 oauth_enabled: true apikey_enabled: true @@ -210,6 +210,12 @@ gateway: force_http: false # 允许在 WSv2 下按策略恢复 store=true(默认 false) allow_store_recovery: false + # store=false 且无可复用会话连接时的策略: + # strict=强制新建连接(隔离优先),adaptive=仅在高风险失败后强制新建,off=尽量复用(性能优先) + store_disabled_conn_mode: strict + # store=false 且无可复用会话连接时,是否强制新建连接(默认 true,优先会话隔离) + # 兼容旧配置:仅在 store_disabled_conn_mode 未配置时生效 + store_disabled_force_new_conn: true # 是否启用 WSv2 generate=false 预热(默认 false) prewarm_generate_enabled: false # 协议 feature 开关,v2 优先于 v1 @@ -230,8 +236,22 @@ gateway: write_timeout_seconds: 120 pool_target_utilization: 0.7 queue_limit_per_conn: 256 + # 流式写出批量 flush 参数 + event_flush_batch_size: 4 + event_flush_interval_ms: 25 + # 预热触发冷却(毫秒) + prewarm_cooldown_ms: 300 # WS 回退到 HTTP 后的冷却时间(秒),用于避免 WS/HTTP 来回抖动;0 表示关闭冷却 fallback_cooldown_seconds: 30 + # WS 重试退避参数(毫秒) + retry_backoff_initial_ms: 120 + retry_backoff_max_ms: 2000 + # 抖动比例(0-1) + retry_jitter_ratio: 0.2 + # 单次请求 WS 重试总预算(毫秒);0 表示关闭预算限制,不因预算回退 HTTP + retry_total_budget_ms: 0 + # payload_schema 日志采样率(0-1);降低热路径日志放大 + payload_log_sample_rate: 0.2 # 调度与粘连参数 lb_top_k: 3 sticky_session_ttl_seconds: 3600 diff --git a/tools/perf/openai_ws_v2_perf_suite_k6.js b/tools/perf/openai_ws_v2_perf_suite_k6.js new file mode 100644 index 000000000..df700270f --- /dev/null +++ b/tools/perf/openai_ws_v2_perf_suite_k6.js @@ -0,0 +1,216 @@ +import http from 'k6/http'; +import { check, sleep } from 'k6'; +import { Rate, Trend } from 'k6/metrics'; + +const baseURL = (__ENV.BASE_URL || 'http://127.0.0.1:5231').replace(/\/$/, ''); +const wsAPIKey = (__ENV.WS_API_KEY || '').trim(); +const wsHotspotAPIKey = (__ENV.WS_HOTSPOT_API_KEY || wsAPIKey).trim(); +const model = __ENV.MODEL || 'gpt-5.3-codex'; +const duration = __ENV.DURATION || '5m'; +const timeout = __ENV.TIMEOUT || '180s'; + +const shortRPS = Number(__ENV.SHORT_RPS || 12); +const longRPS = Number(__ENV.LONG_RPS || 4); +const errorRPS = Number(__ENV.ERROR_RPS || 2); +const hotspotRPS = Number(__ENV.HOTSPOT_RPS || 10); +const preAllocatedVUs = Number(__ENV.PRE_ALLOCATED_VUS || 50); +const maxVUs = Number(__ENV.MAX_VUS || 400); + +const reqDurationMs = new Trend('openai_ws_v2_perf_req_duration_ms', true); +const ttftMs = new Trend('openai_ws_v2_perf_ttft_ms', true); +const non2xxRate = new Rate('openai_ws_v2_perf_non2xx_rate'); +const doneRate = new Rate('openai_ws_v2_perf_done_rate'); +const expectedErrorRate = new Rate('openai_ws_v2_perf_expected_error_rate'); + +export const options = { + scenarios: { + short_request: { + executor: 'constant-arrival-rate', + exec: 'runShortRequest', + rate: shortRPS, + timeUnit: '1s', + duration, + preAllocatedVUs, + maxVUs, + tags: { scenario: 'short_request' }, + }, + long_request: { + executor: 'constant-arrival-rate', + exec: 'runLongRequest', + rate: longRPS, + timeUnit: '1s', + duration, + preAllocatedVUs: Math.max(20, Math.ceil(longRPS * 6)), + maxVUs: Math.max(100, Math.ceil(longRPS * 20)), + tags: { scenario: 'long_request' }, + }, + error_injection: { + executor: 'constant-arrival-rate', + exec: 'runErrorInjection', + rate: errorRPS, + timeUnit: '1s', + duration, + preAllocatedVUs: Math.max(8, Math.ceil(errorRPS * 4)), + maxVUs: Math.max(40, Math.ceil(errorRPS * 12)), + tags: { scenario: 'error_injection' }, + }, + hotspot_account: { + executor: 'constant-arrival-rate', + exec: 'runHotspotAccount', + rate: hotspotRPS, + timeUnit: '1s', + duration, + preAllocatedVUs: Math.max(16, Math.ceil(hotspotRPS * 3)), + maxVUs: Math.max(80, Math.ceil(hotspotRPS * 10)), + tags: { scenario: 'hotspot_account' }, + }, + }, + thresholds: { + openai_ws_v2_perf_non2xx_rate: ['rate<0.05'], + openai_ws_v2_perf_req_duration_ms: ['p(95)<5000', 'p(99)<9000'], + openai_ws_v2_perf_ttft_ms: ['p(99)<2000'], + openai_ws_v2_perf_done_rate: ['rate>0.95'], + }, +}; + +function buildHeaders(apiKey, opts = {}) { + const headers = { + 'Content-Type': 'application/json', + 'User-Agent': 'codex_cli_rs/0.104.0', + 'OpenAI-Beta': 'responses_websockets=2026-02-06,responses=experimental', + }; + if (apiKey) { + headers.Authorization = `Bearer ${apiKey}`; + } + if (opts.sessionID) { + headers.session_id = opts.sessionID; + } + if (opts.conversationID) { + headers.conversation_id = opts.conversationID; + } + return headers; +} + +function shortBody() { + return JSON.stringify({ + model, + stream: false, + input: [ + { + role: 'user', + content: [{ type: 'input_text', text: '请回复一个词:pong' }], + }, + ], + max_output_tokens: 64, + }); +} + +function longBody() { + const tools = []; + for (let i = 0; i < 28; i += 1) { + tools.push({ + type: 'function', + name: `perf_tool_${i}`, + description: 'load test tool schema', + parameters: { + type: 'object', + properties: { + query: { type: 'string' }, + limit: { type: 'number' }, + with_cache: { type: 'boolean' }, + }, + required: ['query'], + }, + }); + } + + const input = []; + for (let i = 0; i < 20; i += 1) { + input.push({ + role: 'user', + content: [{ type: 'input_text', text: `长请求压测消息 ${i}: 请输出简要摘要。` }], + }); + } + + return JSON.stringify({ + model, + stream: false, + input, + tools, + parallel_tool_calls: true, + max_output_tokens: 256, + reasoning: { effort: 'medium' }, + instructions: '你是压测助手,简洁回复。', + }); +} + +function errorInjectionBody() { + return JSON.stringify({ + model, + stream: false, + previous_response_id: `resp_not_found_${__VU}_${__ITER}`, + input: [ + { + role: 'user', + content: [{ type: 'input_text', text: '触发错误注入路径。' }], + }, + ], + }); +} + +function postResponses(apiKey, body, tags, opts = {}) { + const res = http.post(`${baseURL}/v1/responses`, body, { + headers: buildHeaders(apiKey, opts), + timeout, + tags, + }); + reqDurationMs.add(res.timings.duration, tags); + ttftMs.add(res.timings.waiting, tags); + non2xxRate.add(res.status < 200 || res.status >= 300, tags); + return res; +} + +function hasDone(res) { + return !!res && !!res.body && res.body.indexOf('[DONE]') >= 0; +} + +export function runShortRequest() { + const tags = { scenario: 'short_request' }; + const res = postResponses(wsAPIKey, shortBody(), tags); + check(res, { 'short status is 2xx': (r) => r.status >= 200 && r.status < 300 }); + doneRate.add(hasDone(res) || (res.status >= 200 && res.status < 300), tags); +} + +export function runLongRequest() { + const tags = { scenario: 'long_request' }; + const res = postResponses(wsAPIKey, longBody(), tags); + check(res, { 'long status is 2xx': (r) => r.status >= 200 && r.status < 300 }); + doneRate.add(hasDone(res) || (res.status >= 200 && res.status < 300), tags); +} + +export function runErrorInjection() { + const tags = { scenario: 'error_injection' }; + const res = postResponses(wsAPIKey, errorInjectionBody(), tags); + // 错误注入场景允许 4xx/5xx,重点观测 fallback 和错误路径抖动。 + expectedErrorRate.add(res.status >= 400, tags); + doneRate.add(hasDone(res), tags); +} + +export function runHotspotAccount() { + const tags = { scenario: 'hotspot_account' }; + const opts = { + sessionID: 'perf-hotspot-session-fixed', + conversationID: 'perf-hotspot-conversation-fixed', + }; + const res = postResponses(wsHotspotAPIKey, shortBody(), tags, opts); + check(res, { 'hotspot status is 2xx': (r) => r.status >= 200 && r.status < 300 }); + doneRate.add(hasDone(res) || (res.status >= 200 && res.status < 300), tags); + sleep(0.01); +} + +export function handleSummary(data) { + return { + stdout: `\nOpenAI WSv2 性能套件压测完成\n${JSON.stringify(data.metrics, null, 2)}\n`, + 'docs/perf/openai-ws-v2-perf-suite-summary.json': JSON.stringify(data, null, 2), + }; +} From f42945038f5439a0c5e4df8af13a4919c85c1f78 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 21:33:18 +0800 Subject: [PATCH 008/120] =?UTF-8?q?chore(ws):=20=E5=B0=86=20OpenAI=20WS=20?= =?UTF-8?q?=E9=AB=98=E9=A2=91=E6=97=A5=E5=BF=97=E9=99=8D=E7=BA=A7=E4=B8=BA?= =?UTF-8?q?=20debug?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../service/openai_gateway_service.go | 6 ++--- .../internal/service/openai_ws_forwarder.go | 22 +++++++++++-------- 2 files changed, 16 insertions(+), 12 deletions(-) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 820e0e0d3..ea53e97a0 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -1251,7 +1251,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco c.Set("openai_ws_transport_reason", wsDecision.Reason) } if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocketV2 { - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "selected account_id=%d account_type=%s transport=%s reason=%s model=%s stream=%v", account.ID, account.Type, @@ -1428,7 +1428,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco // 命中 WS 时仅走 WebSocket Mode;不再自动回退 HTTP。 if wsDecision.Transport == OpenAIUpstreamTransportResponsesWebsocketV2 { _, hasPreviousResponseID := reqBody["previous_response_id"] - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "forward_start account_id=%d account_type=%s model=%s stream=%v has_previous_response_id=%v", account.ID, account.Type, @@ -1540,7 +1540,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco if wsResult != nil { requestID = strings.TrimSpace(wsResult.RequestID) } - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "forward_succeeded account_id=%d request_id=%s stream=%v has_first_token_ms=%v first_token_ms=%d ws_attempts=%d", account.ID, requestID, diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 00d83c839..46dcc1269 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -438,6 +438,10 @@ func logOpenAIWSModeInfo(format string, args ...any) { logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS Mode] "+format, args...) } +func logOpenAIWSModeDebug(format string, args ...any) { + logger.LegacyPrintf("service.openai_gateway", "[debug] [OpenAI WS Mode] "+format, args...) +} + func summarizeOpenAIWSReadCloseError(err error) (status string, reason string) { if err == nil { return "-", "-" @@ -874,7 +878,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( wsPath = normalizeOpenAIWSLogValue(p) } } - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "dial_target account_id=%d account_type=%s ws_host=%s ws_path=%s", account.ID, account.Type, @@ -946,7 +950,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( forceNewConnByPolicy := shouldForceNewConnOnStoreDisabled(storeDisabledConnMode, lastFailureReason) forceNewConn := forceNewConnByPolicy && storeDisabled && previousResponseID == "" && sessionHash != "" && preferredConnID == "" wsHeaders, sessionResolution := s.buildOpenAIWSHeaders(c, account, token, decision, isCodexCLI, turnState, turnMetadata, promptCacheKey) - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "acquire_start account_id=%d account_type=%s transport=%s preferred_conn_id=%s has_previous_response_id=%v session_hash=%s has_turn_state=%v turn_state_len=%d has_turn_metadata=%v turn_metadata_len=%d store_disabled=%v store_disabled_conn_mode=%s retry_last_reason=%s force_new_conn=%v header_user_agent=%s header_openai_beta=%s header_originator=%s header_accept_language=%s header_session_id=%s header_conversation_id=%s session_id_source=%s conversation_id_source=%s has_prompt_cache_key=%v has_chatgpt_account_id=%v has_authorization=%v has_session_id=%v has_conversation_id=%v proxy_enabled=%v", account.ID, account.Type, @@ -1014,7 +1018,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } defer lease.Release() connID := strings.TrimSpace(lease.ConnID()) - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "connected account_id=%d account_type=%s transport=%s conn_id=%s conn_reused=%v conn_pick_ms=%d queue_wait_ms=%d has_previous_response_id=%v", account.ID, account.Type, @@ -1035,7 +1039,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } handshakeTurnState := strings.TrimSpace(lease.HandshakeHeader(openAIWSTurnStateHeader)) - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "handshake account_id=%d conn_id=%s has_turn_state=%v turn_state_len=%d", account.ID, connID, @@ -1076,7 +1080,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( ) return nil, wrapOpenAIWSFallback("write_request", err) } - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "write_request_sent account_id=%d conn_id=%s stream=%v payload_bytes=%d previous_response_id=%s", account.ID, connID, @@ -1159,7 +1163,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( bufferedStreamEvents = bufferedStreamEvents[:0] flushStreamWriter(true) flushedBufferedEventCount += flushed - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "buffer_flush account_id=%d conn_id=%s reason=%s flushed=%d total_flushed=%d client_disconnected=%v", account.ID, connID, @@ -1230,7 +1234,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( firstTokenMs = &ms } if shouldLogOpenAIWSEvent(eventCount, eventType) { - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "event_received account_id=%d conn_id=%s idx=%d type=%s bytes=%d token=%v terminal=%v buffered_pending=%d", account.ID, connID, @@ -1299,7 +1303,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( bufferedStreamEvents = append(bufferedStreamEvents, buffered) bufferedEventCount++ if shouldLogOpenAIWSBufferedEvent(bufferedEventCount) { - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "buffer_enqueue account_id=%d conn_id=%s idx=%d event_idx=%d event_type=%s buffer_size=%d", account.ID, connID, @@ -1374,7 +1378,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if firstTokenMs != nil { firstTokenMsValue = *firstTokenMs } - logOpenAIWSModeInfo( + logOpenAIWSModeDebug( "completed account_id=%d conn_id=%s response_id=%s stream=%v duration_ms=%d events=%d token_events=%d terminal_events=%d buffered_events=%d buffered_flushed=%d first_event=%s last_event=%s first_token_ms=%d wrote_downstream=%v client_disconnected=%v", account.ID, connID, From c9112bcc8485d11004ee353f0fbd305badd5b2de Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 22:11:44 +0800 Subject: [PATCH 009/120] =?UTF-8?q?feat(openai-ws):=20=E5=AE=8C=E5=96=84?= =?UTF-8?q?=20WS=20=E5=85=A5=E7=AB=99=E4=BB=A3=E7=90=86=E5=B9=B6=E5=A2=9E?= =?UTF-8?q?=E5=BC=BA=E5=8F=AF=E8=A7=82=E6=B5=8B=E6=80=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 支持 Responses WS 入站路由与升级处理 - 将 WS 并发槽位改为按 turn 获取与释放,避免长连接占槽 - 为 WS 入站补齐每轮 usage 记录与首 token 指标 - 收敛协议行为:v2 入站仅允许 response.create - 增加可携带 close code 的错误类型并细化客户端关闭语义 - 统一 WS 模式日志标记 openai_ws_mode=true Co-Authored-By: Claude Opus 4.6 --- backend/cmd/server/VERSION | 2 +- .../handler/openai_gateway_handler.go | 260 +++++++++++++ backend/internal/server/routes/gateway.go | 2 + .../service/openai_gateway_service.go | 121 ++++++ .../service/openai_ws_fallback_test.go | 32 ++ .../internal/service/openai_ws_forwarder.go | 359 +++++++++++++++++- .../openai_ws_protocol_forward_test.go | 4 + .../internal/service/openai_ws_state_store.go | 51 +-- .../service/openai_ws_state_store_test.go | 16 +- 9 files changed, 789 insertions(+), 58 deletions(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index e1f56a4f6..0fbc54109 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.85.1 +0.1.85.2 diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index cdbf5662e..cff764288 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -17,6 +17,7 @@ import ( middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" + coderws "github.com/coder/websocket" "github.com/gin-gonic/gin" "github.com/tidwall/gjson" "go.uber.org/zap" @@ -451,6 +452,243 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } } +// ResponsesWebSocket handles OpenAI Responses API WebSocket ingress endpoint +// GET /openai/v1/responses (Upgrade: websocket) +func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { + if !isOpenAIWSUpgradeRequest(c.Request) { + h.errorResponse(c, http.StatusUpgradeRequired, "invalid_request_error", "WebSocket upgrade required (Upgrade: websocket)") + return + } + + apiKey, ok := middleware2.GetAPIKeyFromContext(c) + if !ok { + h.errorResponse(c, http.StatusUnauthorized, "authentication_error", "Invalid API key") + return + } + subject, ok := middleware2.GetAuthSubjectFromContext(c) + if !ok { + h.errorResponse(c, http.StatusInternalServerError, "api_error", "User context not found") + return + } + + reqLog := requestLogger( + c, + "handler.openai_gateway.responses_ws", + zap.Int64("user_id", subject.UserID), + zap.Int64("api_key_id", apiKey.ID), + zap.Any("group_id", apiKey.GroupID), + zap.Bool("openai_ws_mode", true), + ) + if !h.ensureResponsesDependencies(c, reqLog) { + return + } + reqLog.Info("openai.websocket_ingress_started", zap.Bool("openai_ws_mode", true)) + + wsConn, err := coderws.Accept(c.Writer, c.Request, &coderws.AcceptOptions{ + CompressionMode: coderws.CompressionContextTakeover, + }) + if err != nil { + reqLog.Warn("openai.websocket_accept_failed", zap.Error(err)) + return + } + defer wsConn.CloseNow() + wsConn.SetReadLimit(128 * 1024 * 1024) + + ctx := c.Request.Context() + readCtx, cancel := context.WithTimeout(ctx, 30*time.Second) + msgType, firstMessage, err := wsConn.Read(readCtx) + cancel() + if err != nil { + reqLog.Warn("openai.websocket_read_first_message_failed", zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "missing first response.create message") + return + } + if msgType != coderws.MessageText && msgType != coderws.MessageBinary { + closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "unsupported websocket message type") + return + } + if !gjson.ValidBytes(firstMessage) { + closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "invalid JSON payload") + return + } + + reqModel := strings.TrimSpace(gjson.GetBytes(firstMessage, "model").String()) + if reqModel == "" { + closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "model is required in first response.create payload") + return + } + previousResponseID := strings.TrimSpace(gjson.GetBytes(firstMessage, "previous_response_id").String()) + reqLog = reqLog.With( + zap.Bool("ws_ingress", true), + zap.String("model", reqModel), + zap.Bool("has_previous_response_id", previousResponseID != ""), + ) + setOpsRequestContext(c, reqModel, true, firstMessage) + + userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency) + if err != nil { + reqLog.Warn("openai.websocket_user_slot_acquire_failed", zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to acquire user concurrency slot") + return + } + if !userAcquired { + closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "too many concurrent requests, please retry later") + return + } + subscription, _ := middleware2.GetSubscriptionFromContext(c) + if err := h.billingCacheService.CheckBillingEligibility(ctx, apiKey.User, apiKey, apiKey.Group, subscription); err != nil { + reqLog.Info("openai.websocket_billing_eligibility_check_failed", zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "billing check failed") + return + } + + sessionHash := h.gatewayService.GenerateSessionHash(c, firstMessage) + selection, scheduleDecision, err := h.gatewayService.SelectAccountWithScheduler( + ctx, + apiKey.GroupID, + previousResponseID, + sessionHash, + reqModel, + nil, + ) + if err != nil { + reqLog.Warn("openai.websocket_account_select_failed", zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "no available account") + return + } + if selection == nil || selection.Account == nil { + closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "no available account") + return + } + + account := selection.Account + accountReleaseFunc := selection.ReleaseFunc + if !selection.Acquired { + if selection.WaitPlan == nil { + closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "account is busy, please retry later") + return + } + fastReleaseFunc, fastAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot( + ctx, + account.ID, + selection.WaitPlan.MaxConcurrency, + ) + if err != nil { + reqLog.Warn("openai.websocket_account_slot_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to acquire account concurrency slot") + return + } + if !fastAcquired { + closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "account is busy, please retry later") + return + } + accountReleaseFunc = fastReleaseFunc + } + if err := h.gatewayService.BindStickySession(ctx, apiKey.GroupID, sessionHash, account.ID); err != nil { + reqLog.Warn("openai.websocket_bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + } + + token, _, err := h.gatewayService.GetAccessToken(ctx, account) + if err != nil { + reqLog.Warn("openai.websocket_get_access_token_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to get access token") + return + } + + reqLog.Debug("openai.websocket_account_selected", + zap.Int64("account_id", account.ID), + zap.String("account_name", account.Name), + zap.String("schedule_layer", scheduleDecision.Layer), + zap.Int("candidate_count", scheduleDecision.CandidateCount), + ) + + currentUserRelease := wrapReleaseOnDone(ctx, userReleaseFunc) + currentAccountRelease := wrapReleaseOnDone(ctx, accountReleaseFunc) + releaseTurnSlots := func() { + if currentAccountRelease != nil { + currentAccountRelease() + currentAccountRelease = nil + } + if currentUserRelease != nil { + currentUserRelease() + currentUserRelease = nil + } + } + defer releaseTurnSlots() + + userAgent := c.GetHeader("User-Agent") + clientIP := ip.GetClientIP(c) + hooks := &service.OpenAIWSIngressHooks{ + BeforeTurn: func(turn int) error { + if turn == 1 { + return nil + } + // 非首轮 turn 需要重新抢占并发槽位,避免长连接空闲占槽。 + userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency) + if err != nil { + return service.NewOpenAIWSClientCloseError(coderws.StatusInternalError, "failed to acquire user concurrency slot", err) + } + if !userAcquired { + return service.NewOpenAIWSClientCloseError(coderws.StatusTryAgainLater, "too many concurrent requests, please retry later", nil) + } + accountReleaseFunc, accountAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + if err != nil { + if userReleaseFunc != nil { + userReleaseFunc() + } + return service.NewOpenAIWSClientCloseError(coderws.StatusInternalError, "failed to acquire account concurrency slot", err) + } + if !accountAcquired { + if userReleaseFunc != nil { + userReleaseFunc() + } + return service.NewOpenAIWSClientCloseError(coderws.StatusTryAgainLater, "account is busy, please retry later", nil) + } + currentUserRelease = wrapReleaseOnDone(ctx, userReleaseFunc) + currentAccountRelease = wrapReleaseOnDone(ctx, accountReleaseFunc) + return nil + }, + AfterTurn: func(turn int, result *service.OpenAIForwardResult, turnErr error) { + releaseTurnSlots() + if turnErr != nil || result == nil { + return + } + h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, true, result.FirstTokenMs) + h.submitUsageRecordTask(func(taskCtx context.Context) { + if err := h.gatewayService.RecordUsage(taskCtx, &service.OpenAIRecordUsageInput{ + Result: result, + APIKey: apiKey, + User: apiKey.User, + Account: account, + Subscription: subscription, + UserAgent: userAgent, + IPAddress: clientIP, + APIKeyService: h.apiKeyService, + }); err != nil { + reqLog.Error("openai.websocket_record_usage_failed", + zap.Int64("account_id", account.ID), + zap.String("request_id", result.RequestID), + zap.Error(err), + ) + } + }) + }, + } + + if err := h.gatewayService.ProxyResponsesWebSocketFromClient(ctx, c, wsConn, account, token, firstMessage, hooks); err != nil { + h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil) + reqLog.Warn("openai.websocket_proxy_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + var closeErr *service.OpenAIWSClientCloseError + if errors.As(err, &closeErr) { + closeOpenAIClientWS(wsConn, closeErr.StatusCode(), closeErr.Reason()) + return + } + closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "upstream websocket proxy failed") + return + } + reqLog.Info("openai.websocket_ingress_closed", zap.Bool("openai_ws_mode", true), zap.Int64("account_id", account.ID)) +} + func (h *OpenAIGatewayHandler) recoverResponsesPanic(c *gin.Context, streamStarted *bool) { recovered := recover() if recovered == nil { @@ -667,3 +905,25 @@ func (h *OpenAIGatewayHandler) errorResponse(c *gin.Context, status int, errType }, }) } + +func isOpenAIWSUpgradeRequest(r *http.Request) bool { + if r == nil { + return false + } + if !strings.EqualFold(strings.TrimSpace(r.Header.Get("Upgrade")), "websocket") { + return false + } + return strings.Contains(strings.ToLower(strings.TrimSpace(r.Header.Get("Connection"))), "upgrade") +} + +func closeOpenAIClientWS(conn *coderws.Conn, status coderws.StatusCode, reason string) { + if conn == nil { + return + } + reason = strings.TrimSpace(reason) + if len(reason) > 120 { + reason = reason[:120] + } + _ = conn.Close(status, reason) + _ = conn.CloseNow() +} diff --git a/backend/internal/server/routes/gateway.go b/backend/internal/server/routes/gateway.go index 930c8b9ee..6bd91b853 100644 --- a/backend/internal/server/routes/gateway.go +++ b/backend/internal/server/routes/gateway.go @@ -43,6 +43,7 @@ func RegisterGatewayRoutes( gateway.GET("/usage", h.Gateway.Usage) // OpenAI Responses API gateway.POST("/responses", h.OpenAIGateway.Responses) + gateway.GET("/responses", h.OpenAIGateway.ResponsesWebSocket) // 明确阻止旧协议入口:OpenAI 仅支持 Responses API,避免客户端误解为会自动路由到其它平台。 gateway.POST("/chat/completions", func(c *gin.Context) { c.JSON(http.StatusBadRequest, gin.H{ @@ -69,6 +70,7 @@ func RegisterGatewayRoutes( // OpenAI Responses API(不带v1前缀的别名) r.POST("/responses", bodyLimit, clientRequestID, opsErrorLogger, gin.HandlerFunc(apiKeyAuth), h.OpenAIGateway.Responses) + r.GET("/responses", bodyLimit, clientRequestID, opsErrorLogger, gin.HandlerFunc(apiKeyAuth), h.OpenAIGateway.ResponsesWebSocket) // Antigravity 模型列表 r.GET("/antigravity/models", gin.HandlerFunc(apiKeyAuth), h.Gateway.AntigravityModels) diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index ea53e97a0..87e48a53c 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -390,6 +390,126 @@ func classifyOpenAIWSReconnectReason(err error) (string, bool) { } } +func resolveOpenAIWSFallbackErrorResponse(err error) (statusCode int, errType string, clientMessage string, upstreamMessage string, ok bool) { + if err == nil { + return 0, "", "", "", false + } + var fallbackErr *openAIWSFallbackError + if !errors.As(err, &fallbackErr) || fallbackErr == nil { + return 0, "", "", "", false + } + + reason := strings.TrimSpace(fallbackErr.Reason) + reason = strings.TrimPrefix(reason, "prewarm_") + if reason == "" { + return 0, "", "", "", false + } + + var dialErr *openAIWSDialError + if fallbackErr.Err != nil && errors.As(fallbackErr.Err, &dialErr) && dialErr != nil { + if dialErr.StatusCode > 0 { + statusCode = dialErr.StatusCode + } + if dialErr.Err != nil { + upstreamMessage = sanitizeUpstreamErrorMessage(strings.TrimSpace(dialErr.Err.Error())) + } + } + + switch reason { + case "previous_response_not_found": + if statusCode == 0 { + statusCode = http.StatusBadRequest + } + errType = "invalid_request_error" + if upstreamMessage == "" { + upstreamMessage = "previous response not found" + } + case "upgrade_required": + if statusCode == 0 { + statusCode = http.StatusUpgradeRequired + } + case "ws_unsupported": + if statusCode == 0 { + statusCode = http.StatusBadRequest + } + case "auth_failed": + if statusCode == 0 { + statusCode = http.StatusUnauthorized + } + case "upstream_rate_limited": + if statusCode == 0 { + statusCode = http.StatusTooManyRequests + } + default: + if statusCode == 0 { + return 0, "", "", "", false + } + } + + if upstreamMessage == "" && fallbackErr.Err != nil { + upstreamMessage = sanitizeUpstreamErrorMessage(strings.TrimSpace(fallbackErr.Err.Error())) + } + if upstreamMessage == "" { + switch reason { + case "upgrade_required": + upstreamMessage = "upstream websocket upgrade required" + case "ws_unsupported": + upstreamMessage = "upstream websocket not supported" + case "auth_failed": + upstreamMessage = "upstream authentication failed" + case "upstream_rate_limited": + upstreamMessage = "upstream rate limit exceeded, please retry later" + default: + upstreamMessage = "Upstream request failed" + } + } + + if errType == "" { + if statusCode == http.StatusTooManyRequests { + errType = "rate_limit_error" + } else { + errType = "upstream_error" + } + } + clientMessage = upstreamMessage + return statusCode, errType, clientMessage, upstreamMessage, true +} + +func (s *OpenAIGatewayService) writeOpenAIWSFallbackErrorResponse(c *gin.Context, account *Account, wsErr error) bool { + if c == nil || c.Writer == nil || c.Writer.Written() { + return false + } + statusCode, errType, clientMessage, upstreamMessage, ok := resolveOpenAIWSFallbackErrorResponse(wsErr) + if !ok { + return false + } + if strings.TrimSpace(clientMessage) == "" { + clientMessage = "Upstream request failed" + } + if strings.TrimSpace(upstreamMessage) == "" { + upstreamMessage = clientMessage + } + + setOpsUpstreamError(c, statusCode, upstreamMessage, "") + if account != nil { + appendOpsUpstreamError(c, OpsUpstreamErrorEvent{ + Platform: account.Platform, + AccountID: account.ID, + AccountName: account.Name, + UpstreamStatusCode: statusCode, + Kind: "ws_error", + Message: upstreamMessage, + }) + } + c.JSON(statusCode, gin.H{ + "error": gin.H{ + "type": errType, + "message": clientMessage, + }, + }) + return true +} + func (s *OpenAIGatewayService) openAIWSRetryBackoff(attempt int) time.Duration { if attempt <= 0 { return 0 @@ -1551,6 +1671,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco ) return wsResult, nil } + s.writeOpenAIWSFallbackErrorResponse(c, account, wsErr) return nil, wsErr } diff --git a/backend/internal/service/openai_ws_fallback_test.go b/backend/internal/service/openai_ws_fallback_test.go index 0a386aaa5..ea20f098d 100644 --- a/backend/internal/service/openai_ws_fallback_test.go +++ b/backend/internal/service/openai_ws_fallback_test.go @@ -54,6 +54,38 @@ func TestOpenAIWSErrorHTTPStatus(t *testing.T) { require.Equal(t, http.StatusBadGateway, openAIWSErrorHTTPStatus([]byte(`{"type":"error","error":{"type":"server_error","code":"server_error","message":"server"}}`))) } +func TestResolveOpenAIWSFallbackErrorResponse(t *testing.T) { + t.Run("previous_response_not_found", func(t *testing.T) { + statusCode, errType, clientMessage, upstreamMessage, ok := resolveOpenAIWSFallbackErrorResponse( + wrapOpenAIWSFallback("previous_response_not_found", errors.New("previous response not found")), + ) + require.True(t, ok) + require.Equal(t, http.StatusBadRequest, statusCode) + require.Equal(t, "invalid_request_error", errType) + require.Equal(t, "previous response not found", clientMessage) + require.Equal(t, "previous response not found", upstreamMessage) + }) + + t.Run("auth_failed_uses_dial_status", func(t *testing.T) { + statusCode, errType, clientMessage, upstreamMessage, ok := resolveOpenAIWSFallbackErrorResponse( + wrapOpenAIWSFallback("auth_failed", &openAIWSDialError{ + StatusCode: http.StatusForbidden, + Err: errors.New("forbidden"), + }), + ) + require.True(t, ok) + require.Equal(t, http.StatusForbidden, statusCode) + require.Equal(t, "upstream_error", errType) + require.Equal(t, "forbidden", clientMessage) + require.Equal(t, "forbidden", upstreamMessage) + }) + + t.Run("non_fallback_error_not_resolved", func(t *testing.T) { + _, _, _, _, ok := resolveOpenAIWSFallbackErrorResponse(errors.New("plain error")) + require.False(t, ok) + }) +} + func TestOpenAIWSFallbackCooling(t *testing.T) { svc := &OpenAIGatewayService{cfg: &config.Config{}} svc.cfg.Gateway.OpenAIWS.FallbackCooldownSeconds = 1 diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 46dcc1269..47ec38900 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -79,6 +79,59 @@ func wrapOpenAIWSFallback(reason string, err error) error { return &openAIWSFallbackError{Reason: strings.TrimSpace(reason), Err: err} } +// OpenAIWSClientCloseError 表示应以指定 WebSocket close code 主动关闭客户端连接的错误。 +type OpenAIWSClientCloseError struct { + statusCode coderws.StatusCode + reason string + err error +} + +// NewOpenAIWSClientCloseError 创建一个客户端 WS 关闭错误。 +func NewOpenAIWSClientCloseError(statusCode coderws.StatusCode, reason string, err error) error { + return &OpenAIWSClientCloseError{ + statusCode: statusCode, + reason: strings.TrimSpace(reason), + err: err, + } +} + +func (e *OpenAIWSClientCloseError) Error() string { + if e == nil { + return "" + } + if e.err == nil { + return fmt.Sprintf("openai ws client close: %d %s", int(e.statusCode), strings.TrimSpace(e.reason)) + } + return fmt.Sprintf("openai ws client close: %d %s: %v", int(e.statusCode), strings.TrimSpace(e.reason), e.err) +} + +func (e *OpenAIWSClientCloseError) Unwrap() error { + if e == nil { + return nil + } + return e.err +} + +func (e *OpenAIWSClientCloseError) StatusCode() coderws.StatusCode { + if e == nil { + return coderws.StatusInternalError + } + return e.statusCode +} + +func (e *OpenAIWSClientCloseError) Reason() string { + if e == nil { + return "" + } + return strings.TrimSpace(e.reason) +} + +// OpenAIWSIngressHooks 定义入站 WS 每个 turn 的生命周期回调。 +type OpenAIWSIngressHooks struct { + BeforeTurn func(turn int) error + AfterTurn func(turn int, result *OpenAIForwardResult, turnErr error) +} + func normalizeOpenAIWSLogValue(value string) string { trimmed := strings.TrimSpace(value) if trimmed == "" { @@ -435,11 +488,11 @@ func applyOpenAIWSRetryPayloadStrategy(payload map[string]any, attempt int) (str } func logOpenAIWSModeInfo(format string, args ...any) { - logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS Mode] "+format, args...) + logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS Mode][openai_ws_mode=true] "+format, args...) } func logOpenAIWSModeDebug(format string, args ...any) { - logger.LegacyPrintf("service.openai_gateway", "[debug] [OpenAI WS Mode] "+format, args...) + logger.LegacyPrintf("service.openai_gateway", "[debug] [OpenAI WS Mode][openai_ws_mode=true] "+format, args...) } func summarizeOpenAIWSReadCloseError(err error) (status string, reason string) { @@ -1408,6 +1461,308 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( }, nil } +// ProxyResponsesWebSocketFromClient 处理客户端入站 WebSocket(OpenAI Responses WS Mode)并转发到上游。 +// 当前实现按“单请求 -> 终止事件 -> 下一请求”的顺序代理,适配 Codex CLI 的 turn 模式。 +func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( + ctx context.Context, + c *gin.Context, + clientConn *coderws.Conn, + account *Account, + token string, + firstClientMessage []byte, + hooks *OpenAIWSIngressHooks, +) error { + if s == nil { + return errors.New("service is nil") + } + if c == nil { + return errors.New("gin context is nil") + } + if clientConn == nil { + return errors.New("client websocket is nil") + } + if account == nil { + return errors.New("account is nil") + } + if strings.TrimSpace(token) == "" { + return errors.New("token is empty") + } + + wsDecision := s.getOpenAIWSProtocolResolver().Resolve(account) + if wsDecision.Transport != OpenAIUpstreamTransportResponsesWebsocketV2 { + return fmt.Errorf("websocket ingress requires ws_v2 transport, got=%s", wsDecision.Transport) + } + + wsURL, err := s.buildOpenAIResponsesWSURL(account) + if err != nil { + return fmt.Errorf("build ws url: %w", err) + } + + parseClientPayload := func(raw []byte) (map[string]any, string, string, string, string, error) { + trimmed := strings.TrimSpace(string(raw)) + if trimmed == "" { + return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "empty websocket request payload", nil) + } + if !gjson.Valid(trimmed) { + return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", nil) + } + + payload := make(map[string]any) + if err := json.Unmarshal([]byte(trimmed), &payload); err != nil { + return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) + } + + eventType := openAIWSPayloadString(payload, "type") + if eventType == "" { + payload["type"] = "response.create" + eventType = "response.create" + } + if eventType != "response.create" { + if eventType == "response.append" { + return nil, "", "", "", "", NewOpenAIWSClientCloseError( + coderws.StatusPolicyViolation, + "response.append is not supported in ws v2; use response.create with previous_response_id", + nil, + ) + } + return nil, "", "", "", "", NewOpenAIWSClientCloseError( + coderws.StatusPolicyViolation, + fmt.Sprintf("unsupported websocket request type: %s", eventType), + nil, + ) + } + + if turnMetadata := strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)); turnMetadata != "" { + setOpenAIWSTurnMetadata(payload, turnMetadata) + } + + originalModel := strings.TrimSpace(openAIWSPayloadString(payload, "model")) + if originalModel == "" { + return nil, "", "", "", "", NewOpenAIWSClientCloseError( + coderws.StatusPolicyViolation, + "model is required in response.create payload", + nil, + ) + } + mappedModel := originalModel + mappedModel = account.GetMappedModel(originalModel) + if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { + mappedModel = normalizedModel + } + if mappedModel != originalModel { + payload["model"] = mappedModel + } + + promptCacheKey := strings.TrimSpace(openAIWSPayloadString(payload, "prompt_cache_key")) + previousResponseID := strings.TrimSpace(openAIWSPayloadString(payload, "previous_response_id")) + return payload, eventType, promptCacheKey, previousResponseID, originalModel, nil + } + + firstPayload, _, firstPromptCacheKey, firstPreviousResponseID, firstOriginalModel, err := parseClientPayload(firstClientMessage) + if err != nil { + return err + } + + turnState := strings.TrimSpace(c.GetHeader(openAIWSTurnStateHeader)) + stateStore := s.getOpenAIWSStateStore() + groupID := getOpenAIGroupIDFromContext(c) + firstPayloadJSON := payloadAsJSONBytes(firstPayload) + sessionHash := s.GenerateSessionHash(c, firstPayloadJSON) + if turnState == "" && stateStore != nil && sessionHash != "" { + if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { + turnState = savedTurnState + } + } + + preferredConnID := "" + if stateStore != nil && firstPreviousResponseID != "" { + if connID, ok := stateStore.GetResponseConn(firstPreviousResponseID); ok { + preferredConnID = connID + } + } + + storeDisabled := s.isOpenAIWSStoreDisabledInRequest(firstPayload, account) + if stateStore != nil && storeDisabled && firstPreviousResponseID == "" && sessionHash != "" { + if connID, ok := stateStore.GetSessionConn(groupID, sessionHash); ok { + preferredConnID = connID + } + } + + isCodexCLI := openai.IsCodexCLIRequest(c.GetHeader("User-Agent")) || (s.cfg != nil && s.cfg.Gateway.ForceCodexCLI) + wsHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), firstPromptCacheKey) + + logOpenAIWSModeDebug( + "ingress_ws_start account_id=%d account_type=%s transport=%s ws_host=%s preferred_conn_id=%s has_session_hash=%v has_previous_response_id=%v store_disabled=%v", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(wsDecision.Transport)), + normalizeOpenAIWSLogValue(func() string { + if parsed, parseErr := url.Parse(wsURL); parseErr == nil && parsed != nil { + return parsed.Host + } + return "-" + }()), + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + sessionHash != "", + firstPreviousResponseID != "", + storeDisabled, + ) + + lease, err := s.getOpenAIWSConnPool().Acquire(ctx, openAIWSAcquireRequest{ + Account: account, + WSURL: wsURL, + Headers: wsHeaders, + ProxyURL: func() string { + if account.ProxyID != nil && account.Proxy != nil { + return account.Proxy.URL() + } + return "" + }(), + PreferredConnID: preferredConnID, + ForceNewConn: false, + }) + if err != nil { + return fmt.Errorf("acquire upstream websocket: %w", err) + } + defer lease.Release() + + if handshakeTurnState := strings.TrimSpace(lease.HandshakeHeader(openAIWSTurnStateHeader)); handshakeTurnState != "" { + if stateStore != nil && sessionHash != "" { + stateStore.BindSessionTurnState(groupID, sessionHash, handshakeTurnState, s.openAIWSSessionStickyTTL()) + } + } + + writeClientMessage := func(message []byte) error { + writeCtx, cancel := context.WithTimeout(ctx, s.openAIWSWriteTimeout()) + defer cancel() + return clientConn.Write(writeCtx, coderws.MessageText, message) + } + + readClientMessage := func() ([]byte, error) { + msgType, payload, readErr := clientConn.Read(ctx) + if readErr != nil { + return nil, readErr + } + if msgType != coderws.MessageText && msgType != coderws.MessageBinary { + return nil, NewOpenAIWSClientCloseError( + coderws.StatusPolicyViolation, + fmt.Sprintf("unsupported websocket client message type: %s", msgType.String()), + nil, + ) + } + return payload, nil + } + + sendAndRelay := func(payload map[string]any, originalModel string) (*OpenAIForwardResult, error) { + turnStart := time.Now() + if err := lease.WriteJSON(payload, s.openAIWSWriteTimeout()); err != nil { + return nil, fmt.Errorf("write upstream websocket request: %w", err) + } + + responseID := "" + usage := OpenAIUsage{} + var firstTokenMs *int + reqStream := true + if streamValue, ok := payload["stream"].(bool); ok { + reqStream = streamValue + } + for { + upstreamMessage, readErr := lease.ReadMessageWithContextTimeout(ctx, s.openAIWSReadTimeout()) + if readErr != nil { + lease.MarkBroken() + return nil, fmt.Errorf("read upstream websocket event: %w", readErr) + } + + if responseID == "" { + responseID = strings.TrimSpace(extractOpenAIWSResponseID(upstreamMessage)) + } + eventType := strings.TrimSpace(gjson.GetBytes(upstreamMessage, "type").String()) + if firstTokenMs == nil && isOpenAIWSTokenEvent(eventType) { + ms := int(time.Since(turnStart).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsageBytes(upstreamMessage, &usage) + + if originalModel != "" { + mappedModel := account.GetMappedModel(originalModel) + if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { + mappedModel = normalizedModel + } + if mappedModel != "" && mappedModel != originalModel { + upstreamMessage = replaceOpenAIWSMessageModel(upstreamMessage, mappedModel, originalModel) + } + } + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(upstreamMessage); changed { + upstreamMessage = corrected + } + if err := writeClientMessage(upstreamMessage); err != nil { + return nil, fmt.Errorf("write client websocket event: %w", err) + } + if isOpenAIWSTerminalEvent(eventType) { + return &OpenAIForwardResult{ + RequestID: responseID, + Usage: usage, + Model: originalModel, + ReasoningEffort: extractOpenAIReasoningEffort(payload, originalModel), + Stream: reqStream, + Duration: time.Since(turnStart), + FirstTokenMs: firstTokenMs, + }, nil + } + } + } + + currentPayload := firstPayload + currentOriginalModel := firstOriginalModel + turn := 1 + for { + if hooks != nil && hooks.BeforeTurn != nil { + if err := hooks.BeforeTurn(turn); err != nil { + return err + } + } + result, relayErr := sendAndRelay(currentPayload, currentOriginalModel) + if hooks != nil && hooks.AfterTurn != nil { + hooks.AfterTurn(turn, result, relayErr) + } + if relayErr != nil { + return relayErr + } + if result == nil { + return errors.New("websocket turn result is nil") + } + responseID := strings.TrimSpace(result.RequestID) + + if responseID != "" && stateStore != nil { + ttl := s.openAIWSResponseStickyTTL() + _ = stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl) + stateStore.BindResponseConn(responseID, lease.ConnID(), ttl) + } + if stateStore != nil && storeDisabled && sessionHash != "" { + stateStore.BindSessionConn(groupID, sessionHash, lease.ConnID(), s.openAIWSSessionStickyTTL()) + } + + nextClientMessage, readErr := readClientMessage() + if readErr != nil { + closeStatus := coderws.CloseStatus(readErr) + switch closeStatus { + case coderws.StatusNormalClosure, coderws.StatusGoingAway, coderws.StatusNoStatusRcvd: + return nil + default: + return fmt.Errorf("read client websocket request: %w", readErr) + } + } + + nextPayload, _, _, _, nextOriginalModel, parseErr := parseClientPayload(nextClientMessage) + if parseErr != nil { + return parseErr + } + currentPayload = nextPayload + currentOriginalModel = nextOriginalModel + turn++ + } +} + func (s *OpenAIGatewayService) isOpenAIWSGeneratePrewarmEnabled() bool { return s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.PrewarmGenerateEnabled } diff --git a/backend/internal/service/openai_ws_protocol_forward_test.go b/backend/internal/service/openai_ws_protocol_forward_test.go index e4ec57f39..fe0b77b15 100644 --- a/backend/internal/service/openai_ws_protocol_forward_test.go +++ b/backend/internal/service/openai_ws_protocol_forward_test.go @@ -190,6 +190,8 @@ func TestOpenAIGatewayService_Forward_WSv2Dial426FallbackHTTP(t *testing.T) { require.Nil(t, result) require.Contains(t, err.Error(), "upgrade_required") require.Nil(t, upstream.lastReq, "WS 模式下不应再回退 HTTP") + require.Equal(t, http.StatusUpgradeRequired, rec.Code) + require.Contains(t, rec.Body.String(), "426") } func TestOpenAIGatewayService_Forward_WSv2FallbackCoolingSkipWS(t *testing.T) { @@ -819,4 +821,6 @@ func TestOpenAIGatewayService_Forward_WSv2PreviousResponseNotFoundDropsPreviousR require.Nil(t, result) require.Nil(t, upstream.lastReq, "previous_response_not_found 不应回退 HTTP") require.Equal(t, int32(1), wsAttempts.Load(), "previous_response_not_found 不应进行 WS 重试") + require.Equal(t, http.StatusBadRequest, rec.Code) + require.Contains(t, strings.ToLower(rec.Body.String()), "previous response not found") } diff --git a/backend/internal/service/openai_ws_state_store.go b/backend/internal/service/openai_ws_state_store.go index 349009458..c00c4054d 100644 --- a/backend/internal/service/openai_ws_state_store.go +++ b/backend/internal/service/openai_ws_state_store.go @@ -14,8 +14,6 @@ import ( const ( openAIWSResponseAccountCachePrefix = "openai:response:" openAIWSStateStoreCleanupInterval = time.Minute - // 分桶增量清理,避免在写锁下做全量 map 扫描。 - openAIWSStateStoreCleanupScanLimit = 128 ) type openAIWSAccountBinding struct { @@ -300,85 +298,54 @@ func (s *defaultOpenAIWSStateStore) maybeCleanup() { s.mu.Lock() defer s.mu.Unlock() - cleanupExpiredAccountBindings(s.responseToAccount, now, openAIWSStateStoreCleanupScanLimit) - cleanupExpiredConnBindings(s.responseToConn, now, openAIWSStateStoreCleanupScanLimit) - cleanupExpiredTurnStateBindings(s.sessionToTurnState, now, openAIWSStateStoreCleanupScanLimit) - cleanupExpiredSessionConnBindings(s.sessionToConn, now, openAIWSStateStoreCleanupScanLimit) + // 固定采样会在高流量场景下产生清理追不上写入的风险;这里按全量过期扫描兜底。 + cleanupExpiredAccountBindings(s.responseToAccount, now) + cleanupExpiredConnBindings(s.responseToConn, now) + cleanupExpiredTurnStateBindings(s.sessionToTurnState, now) + cleanupExpiredSessionConnBindings(s.sessionToConn, now) } -func cleanupExpiredAccountBindings(bindings map[string]openAIWSAccountBinding, now time.Time, limit int) { +func cleanupExpiredAccountBindings(bindings map[string]openAIWSAccountBinding, now time.Time) { if len(bindings) == 0 { return } - if limit <= 0 { - limit = len(bindings) - } - scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } - scanned++ - if scanned >= limit { - break - } } } -func cleanupExpiredConnBindings(bindings map[string]openAIWSConnBinding, now time.Time, limit int) { +func cleanupExpiredConnBindings(bindings map[string]openAIWSConnBinding, now time.Time) { if len(bindings) == 0 { return } - if limit <= 0 { - limit = len(bindings) - } - scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } - scanned++ - if scanned >= limit { - break - } } } -func cleanupExpiredTurnStateBindings(bindings map[string]openAIWSTurnStateBinding, now time.Time, limit int) { +func cleanupExpiredTurnStateBindings(bindings map[string]openAIWSTurnStateBinding, now time.Time) { if len(bindings) == 0 { return } - if limit <= 0 { - limit = len(bindings) - } - scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } - scanned++ - if scanned >= limit { - break - } } } -func cleanupExpiredSessionConnBindings(bindings map[string]openAIWSSessionConnBinding, now time.Time, limit int) { +func cleanupExpiredSessionConnBindings(bindings map[string]openAIWSSessionConnBinding, now time.Time) { if len(bindings) == 0 { return } - if limit <= 0 { - limit = len(bindings) - } - scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } - scanned++ - if scanned >= limit { - break - } } } diff --git a/backend/internal/service/openai_ws_state_store_test.go b/backend/internal/service/openai_ws_state_store_test.go index 5d6d5040d..4e8dc90fd 100644 --- a/backend/internal/service/openai_ws_state_store_test.go +++ b/backend/internal/service/openai_ws_state_store_test.go @@ -93,13 +93,13 @@ func TestOpenAIWSStateStore_GetResponseAccount_NoStaleAfterCacheMiss(t *testing. require.Zero(t, accountID, "上游缓存失效后不应继续命中本地陈旧映射") } -func TestOpenAIWSStateStore_MaybeCleanupIncremental(t *testing.T) { +func TestOpenAIWSStateStore_MaybeCleanupRemovesAllExpired(t *testing.T) { raw := NewOpenAIWSStateStore(nil) store, ok := raw.(*defaultOpenAIWSStateStore) require.True(t, ok) expiredAt := time.Now().Add(-time.Minute) - total := openAIWSStateStoreCleanupScanLimit * 2 + total := 2048 store.mu.Lock() for i := 0; i < total; i++ { store.responseToConn[fmt.Sprintf("resp_%d", i)] = openAIWSConnBinding{ @@ -115,15 +115,5 @@ func TestOpenAIWSStateStore_MaybeCleanupIncremental(t *testing.T) { store.mu.RLock() remaining := len(store.responseToConn) store.mu.RUnlock() - require.Greater(t, remaining, 0, "单次 cleanup 应为增量扫描") - require.Less(t, remaining, total, "单次 cleanup 应至少清理一部分过期键") - - for i := 0; i < 4 && remaining > 0; i++ { - store.lastCleanupUnixNano.Store(time.Now().Add(-2 * openAIWSStateStoreCleanupInterval).UnixNano()) - store.maybeCleanup() - store.mu.RLock() - remaining = len(store.responseToConn) - store.mu.RUnlock() - } - require.Zero(t, remaining, "多轮增量 cleanup 后应清空过期键") + require.Zero(t, remaining, "单轮 cleanup 应清空全部过期键,避免固定速率清理造成堆积") } From 6b204018c3a87c8068d35bb0996e113379c70459 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 22:22:37 +0800 Subject: [PATCH 010/120] =?UTF-8?q?fix(openai-ws):=20=E8=AF=86=E5=88=AB?= =?UTF-8?q?=E5=AE=A2=E6=88=B7=E7=AB=AF=E6=AD=A3=E5=B8=B8=E6=96=AD=E8=BF=9E?= =?UTF-8?q?=E5=B9=B6=E6=B8=85=E7=90=86=E9=87=8D=E5=A4=8D=E6=97=A5=E5=BF=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../handler/openai_gateway_handler.go | 4 +- .../internal/service/openai_ws_forwarder.go | 37 ++++++++++++++--- .../openai_ws_forwarder_ingress_test.go | 41 +++++++++++++++++++ 3 files changed, 75 insertions(+), 7 deletions(-) create mode 100644 backend/internal/service/openai_ws_forwarder_ingress_test.go diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index cff764288..5b3aaf7b6 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -482,7 +482,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { if !h.ensureResponsesDependencies(c, reqLog) { return } - reqLog.Info("openai.websocket_ingress_started", zap.Bool("openai_ws_mode", true)) + reqLog.Info("openai.websocket_ingress_started") wsConn, err := coderws.Accept(c.Writer, c.Request, &coderws.AcceptOptions{ CompressionMode: coderws.CompressionContextTakeover, @@ -686,7 +686,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "upstream websocket proxy failed") return } - reqLog.Info("openai.websocket_ingress_closed", zap.Bool("openai_ws_mode", true), zap.Int64("account_id", account.ID)) + reqLog.Info("openai.websocket_ingress_closed", zap.Int64("account_id", account.ID)) } func (h *OpenAIGatewayHandler) recoverResponsesPanic(c *gin.Context, streamStarted *bool) { diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 47ec38900..6d2aab1e4 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -5,7 +5,9 @@ import ( "encoding/json" "errors" "fmt" + "io" "math/rand" + "net" "net/http" "net/url" "sort" @@ -515,6 +517,26 @@ func summarizeOpenAIWSReadCloseError(err error) (status string, reason string) { return normalizeOpenAIWSLogValue(closeStatus), closeReason } +func isOpenAIWSClientDisconnectError(err error) bool { + if err == nil { + return false + } + if errors.Is(err, io.EOF) || errors.Is(err, net.ErrClosed) || errors.Is(err, context.Canceled) { + return true + } + switch coderws.CloseStatus(err) { + case coderws.StatusNormalClosure, coderws.StatusGoingAway, coderws.StatusNoStatusRcvd, coderws.StatusAbnormalClosure: + return true + } + message := strings.ToLower(strings.TrimSpace(err.Error())) + if message == "" { + return false + } + return strings.Contains(message, "failed to read frame header: eof") || + strings.Contains(message, "unexpected eof") || + strings.Contains(message, "use of closed network connection") +} + func classifyOpenAIWSReadFallbackReason(err error) string { if err == nil { return "read_event" @@ -1744,13 +1766,18 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( nextClientMessage, readErr := readClientMessage() if readErr != nil { - closeStatus := coderws.CloseStatus(readErr) - switch closeStatus { - case coderws.StatusNormalClosure, coderws.StatusGoingAway, coderws.StatusNoStatusRcvd: + if isOpenAIWSClientDisconnectError(readErr) { + closeStatus, closeReason := summarizeOpenAIWSReadCloseError(readErr) + logOpenAIWSModeInfo( + "ingress_ws_client_closed account_id=%d conn_id=%s close_status=%s close_reason=%s", + account.ID, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + closeStatus, + truncateOpenAIWSLogValue(closeReason, openAIWSHeaderValueMaxLen), + ) return nil - default: - return fmt.Errorf("read client websocket request: %w", readErr) } + return fmt.Errorf("read client websocket request: %w", readErr) } nextPayload, _, _, _, nextOriginalModel, parseErr := parseClientPayload(nextClientMessage) diff --git a/backend/internal/service/openai_ws_forwarder_ingress_test.go b/backend/internal/service/openai_ws_forwarder_ingress_test.go new file mode 100644 index 000000000..bb5761878 --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder_ingress_test.go @@ -0,0 +1,41 @@ +package service + +import ( + "context" + "errors" + "io" + "net" + "testing" + + coderws "github.com/coder/websocket" + "github.com/stretchr/testify/require" +) + +func TestIsOpenAIWSClientDisconnectError(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + err error + want bool + }{ + {name: "nil", err: nil, want: false}, + {name: "io_eof", err: io.EOF, want: true}, + {name: "net_closed", err: net.ErrClosed, want: true}, + {name: "context_canceled", err: context.Canceled, want: true}, + {name: "ws_normal_closure", err: coderws.CloseError{Code: coderws.StatusNormalClosure}, want: true}, + {name: "ws_going_away", err: coderws.CloseError{Code: coderws.StatusGoingAway}, want: true}, + {name: "ws_no_status", err: coderws.CloseError{Code: coderws.StatusNoStatusRcvd}, want: true}, + {name: "ws_abnormal_1006", err: coderws.CloseError{Code: coderws.StatusAbnormalClosure}, want: true}, + {name: "ws_policy_violation", err: coderws.CloseError{Code: coderws.StatusPolicyViolation}, want: false}, + {name: "wrapped_eof_message", err: errors.New("failed to get reader: failed to read frame header: EOF"), want: true}, + } + + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + require.Equal(t, tt.want, isOpenAIWSClientDisconnectError(tt.err)) + }) + } +} From 3490008b37f0cec94224bbb604f5cc925b1296a3 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Wed, 25 Feb 2026 23:03:42 +0800 Subject: [PATCH 011/120] =?UTF-8?q?perf(openai-ws):=20=E4=BC=98=E5=8C=96WS?= =?UTF-8?q?=20v2=E9=93=BE=E8=B7=AF=E5=B9=B6=E9=99=8D=E4=BD=8ETTFT=E9=95=BF?= =?UTF-8?q?=E5=B0=BE?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/config/config.go | 14 +- backend/internal/config/config_test.go | 27 +- .../internal/handler/admin/ops_ws_handler.go | 38 +- .../handler/openai_gateway_handler.go | 29 +- .../service/openai_account_scheduler.go | 16 +- .../service/openai_gateway_service.go | 17 +- .../internal/service/openai_ws_forwarder.go | 368 +++++++++++++----- backend/internal/service/openai_ws_pool.go | 59 ++- .../internal/service/openai_ws_pool_test.go | 2 - .../internal/service/openai_ws_state_store.go | 142 ++++--- .../service/openai_ws_state_store_test.go | 49 ++- deploy/config.example.yaml | 16 +- 12 files changed, 520 insertions(+), 257 deletions(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 4b5c1729b..02a57d92b 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -1259,9 +1259,9 @@ func setDefaults() { viper.SetDefault("gateway.openai_ws.prewarm_generate_enabled", false) viper.SetDefault("gateway.openai_ws.responses_websockets", false) viper.SetDefault("gateway.openai_ws.responses_websockets_v2", true) - viper.SetDefault("gateway.openai_ws.max_conns_per_account", 8) - viper.SetDefault("gateway.openai_ws.min_idle_per_account", 1) - viper.SetDefault("gateway.openai_ws.max_idle_per_account", 4) + viper.SetDefault("gateway.openai_ws.max_conns_per_account", 24) + viper.SetDefault("gateway.openai_ws.min_idle_per_account", 4) + viper.SetDefault("gateway.openai_ws.max_idle_per_account", 12) viper.SetDefault("gateway.openai_ws.dynamic_max_conns_by_account_concurrency_enabled", true) viper.SetDefault("gateway.openai_ws.oauth_max_conns_factor", 1.0) viper.SetDefault("gateway.openai_ws.apikey_max_conns_factor", 1.0) @@ -1269,15 +1269,15 @@ func setDefaults() { viper.SetDefault("gateway.openai_ws.read_timeout_seconds", 900) viper.SetDefault("gateway.openai_ws.write_timeout_seconds", 120) viper.SetDefault("gateway.openai_ws.pool_target_utilization", 0.7) - viper.SetDefault("gateway.openai_ws.queue_limit_per_conn", 256) - viper.SetDefault("gateway.openai_ws.event_flush_batch_size", 4) - viper.SetDefault("gateway.openai_ws.event_flush_interval_ms", 25) + viper.SetDefault("gateway.openai_ws.queue_limit_per_conn", 64) + viper.SetDefault("gateway.openai_ws.event_flush_batch_size", 1) + viper.SetDefault("gateway.openai_ws.event_flush_interval_ms", 10) viper.SetDefault("gateway.openai_ws.prewarm_cooldown_ms", 300) viper.SetDefault("gateway.openai_ws.fallback_cooldown_seconds", 30) viper.SetDefault("gateway.openai_ws.retry_backoff_initial_ms", 120) viper.SetDefault("gateway.openai_ws.retry_backoff_max_ms", 2000) viper.SetDefault("gateway.openai_ws.retry_jitter_ratio", 0.2) - viper.SetDefault("gateway.openai_ws.retry_total_budget_ms", 0) + viper.SetDefault("gateway.openai_ws.retry_total_budget_ms", 5000) viper.SetDefault("gateway.openai_ws.payload_log_sample_rate", 0.2) viper.SetDefault("gateway.openai_ws.lb_top_k", 3) viper.SetDefault("gateway.openai_ws.sticky_session_ttl_seconds", 3600) diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index e3db2b063..76026328c 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -111,11 +111,11 @@ func TestLoadDefaultOpenAIWSConfig(t *testing.T) { if cfg.Gateway.OpenAIWS.FallbackCooldownSeconds != 30 { t.Fatalf("Gateway.OpenAIWS.FallbackCooldownSeconds = %d, want 30", cfg.Gateway.OpenAIWS.FallbackCooldownSeconds) } - if cfg.Gateway.OpenAIWS.EventFlushBatchSize != 4 { - t.Fatalf("Gateway.OpenAIWS.EventFlushBatchSize = %d, want 4", cfg.Gateway.OpenAIWS.EventFlushBatchSize) + if cfg.Gateway.OpenAIWS.EventFlushBatchSize != 1 { + t.Fatalf("Gateway.OpenAIWS.EventFlushBatchSize = %d, want 1", cfg.Gateway.OpenAIWS.EventFlushBatchSize) } - if cfg.Gateway.OpenAIWS.EventFlushIntervalMS != 25 { - t.Fatalf("Gateway.OpenAIWS.EventFlushIntervalMS = %d, want 25", cfg.Gateway.OpenAIWS.EventFlushIntervalMS) + if cfg.Gateway.OpenAIWS.EventFlushIntervalMS != 10 { + t.Fatalf("Gateway.OpenAIWS.EventFlushIntervalMS = %d, want 10", cfg.Gateway.OpenAIWS.EventFlushIntervalMS) } if cfg.Gateway.OpenAIWS.PrewarmCooldownMS != 300 { t.Fatalf("Gateway.OpenAIWS.PrewarmCooldownMS = %d, want 300", cfg.Gateway.OpenAIWS.PrewarmCooldownMS) @@ -129,8 +129,8 @@ func TestLoadDefaultOpenAIWSConfig(t *testing.T) { if cfg.Gateway.OpenAIWS.RetryJitterRatio != 0.2 { t.Fatalf("Gateway.OpenAIWS.RetryJitterRatio = %v, want 0.2", cfg.Gateway.OpenAIWS.RetryJitterRatio) } - if cfg.Gateway.OpenAIWS.RetryTotalBudgetMS != 0 { - t.Fatalf("Gateway.OpenAIWS.RetryTotalBudgetMS = %d, want 0", cfg.Gateway.OpenAIWS.RetryTotalBudgetMS) + if cfg.Gateway.OpenAIWS.RetryTotalBudgetMS != 5000 { + t.Fatalf("Gateway.OpenAIWS.RetryTotalBudgetMS = %d, want 5000", cfg.Gateway.OpenAIWS.RetryTotalBudgetMS) } if cfg.Gateway.OpenAIWS.PayloadLogSampleRate != 0.2 { t.Fatalf("Gateway.OpenAIWS.PayloadLogSampleRate = %v, want 0.2", cfg.Gateway.OpenAIWS.PayloadLogSampleRate) @@ -1313,14 +1313,15 @@ func TestValidateConfig_OpenAIWSRules(t *testing.T) { }, wantErr: "gateway.openai_ws.min_idle_per_account must be <= max_idle_per_account", }, - { - name: "max_idle_per_account 不能大于 max_conns_per_account", - mutate: func(c *Config) { - c.Gateway.OpenAIWS.MaxConnsPerAccount = 2 - c.Gateway.OpenAIWS.MaxIdlePerAccount = 3 + { + name: "max_idle_per_account 不能大于 max_conns_per_account", + mutate: func(c *Config) { + c.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + c.Gateway.OpenAIWS.MinIdlePerAccount = 1 + c.Gateway.OpenAIWS.MaxIdlePerAccount = 3 + }, + wantErr: "gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account", }, - wantErr: "gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account", - }, { name: "dial_timeout_seconds 必须为正数", mutate: func(c *Config) { c.Gateway.OpenAIWS.DialTimeoutSeconds = 0 }, diff --git a/backend/internal/handler/admin/ops_ws_handler.go b/backend/internal/handler/admin/ops_ws_handler.go index c030d3037..75fd7ea00 100644 --- a/backend/internal/handler/admin/ops_ws_handler.go +++ b/backend/internal/handler/admin/ops_ws_handler.go @@ -62,7 +62,8 @@ const ( ) var wsConnCount atomic.Int32 -var wsConnCountByIP sync.Map // map[string]*atomic.Int32 +var wsConnCountByIPMu sync.Mutex +var wsConnCountByIP = make(map[string]int32) const qpsWSIdleStopDelay = 30 * time.Second @@ -389,42 +390,31 @@ func tryAcquireOpsWSIPSlot(clientIP string, limit int32) bool { if strings.TrimSpace(clientIP) == "" || limit <= 0 { return true } - - v, _ := wsConnCountByIP.LoadOrStore(clientIP, &atomic.Int32{}) - counter, ok := v.(*atomic.Int32) - if !ok { + wsConnCountByIPMu.Lock() + defer wsConnCountByIPMu.Unlock() + current := wsConnCountByIP[clientIP] + if current >= limit { return false } - - for { - current := counter.Load() - if current >= limit { - return false - } - if counter.CompareAndSwap(current, current+1) { - return true - } - } + wsConnCountByIP[clientIP] = current + 1 + return true } func releaseOpsWSIPSlot(clientIP string) { if strings.TrimSpace(clientIP) == "" { return } - - v, ok := wsConnCountByIP.Load(clientIP) + wsConnCountByIPMu.Lock() + defer wsConnCountByIPMu.Unlock() + current, ok := wsConnCountByIP[clientIP] if !ok { return } - counter, ok := v.(*atomic.Int32) - if !ok { + if current <= 1 { + delete(wsConnCountByIP, clientIP) return } - next := counter.Add(-1) - if next <= 0 { - // Best-effort cleanup; safe even if a new slot was acquired concurrently. - wsConnCountByIP.Delete(clientIP) - } + wsConnCountByIP[clientIP] = current - 1 } func handleQPSWebSocket(parentCtx context.Context, conn *websocket.Conn) { diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 5b3aaf7b6..8d5810385 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -535,6 +535,20 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "too many concurrent requests, please retry later") return } + currentUserRelease := wrapReleaseOnDone(ctx, userReleaseFunc) + var currentAccountRelease func() + releaseTurnSlots := func() { + if currentAccountRelease != nil { + currentAccountRelease() + currentAccountRelease = nil + } + if currentUserRelease != nil { + currentUserRelease() + currentUserRelease = nil + } + } + defer releaseTurnSlots() + subscription, _ := middleware2.GetSubscriptionFromContext(c) if err := h.billingCacheService.CheckBillingEligibility(ctx, apiKey.User, apiKey, apiKey.Group, subscription); err != nil { reqLog.Info("openai.websocket_billing_eligibility_check_failed", zap.Error(err)) @@ -584,6 +598,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { } accountReleaseFunc = fastReleaseFunc } + currentAccountRelease = wrapReleaseOnDone(ctx, accountReleaseFunc) if err := h.gatewayService.BindStickySession(ctx, apiKey.GroupID, sessionHash, account.ID); err != nil { reqLog.Warn("openai.websocket_bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err)) } @@ -602,20 +617,6 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { zap.Int("candidate_count", scheduleDecision.CandidateCount), ) - currentUserRelease := wrapReleaseOnDone(ctx, userReleaseFunc) - currentAccountRelease := wrapReleaseOnDone(ctx, accountReleaseFunc) - releaseTurnSlots := func() { - if currentAccountRelease != nil { - currentAccountRelease() - currentAccountRelease = nil - } - if currentUserRelease != nil { - currentUserRelease() - currentUserRelease = nil - } - } - defer releaseTurnSlots() - userAgent := c.GetHeader("User-Agent") clientIP := ip.GetClientIP(c) hooks := &service.OpenAIWSIngressHooks{ diff --git a/backend/internal/service/openai_account_scheduler.go b/backend/internal/service/openai_account_scheduler.go index 25451e809..c8cf6888d 100644 --- a/backend/internal/service/openai_account_scheduler.go +++ b/backend/internal/service/openai_account_scheduler.go @@ -535,14 +535,14 @@ func (s *OpenAIGatewayService) getOpenAIAccountScheduler() OpenAIAccountSchedule if s == nil { return nil } - s.openaiWSInitMu.Lock() - defer s.openaiWSInitMu.Unlock() - if s.openaiAccountStats == nil { - s.openaiAccountStats = newOpenAIAccountRuntimeStats() - } - if s.openaiScheduler == nil { - s.openaiScheduler = newDefaultOpenAIAccountScheduler(s, s.openaiAccountStats) - } + s.openaiSchedulerOnce.Do(func() { + if s.openaiAccountStats == nil { + s.openaiAccountStats = newOpenAIAccountRuntimeStats() + } + if s.openaiScheduler == nil { + s.openaiScheduler = newDefaultOpenAIAccountScheduler(s, s.openaiAccountStats) + } + }) return s.openaiScheduler } diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 87e48a53c..54f00ab7a 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -247,14 +247,15 @@ type OpenAIGatewayService struct { toolCorrector *CodexToolCorrector openaiWSResolver OpenAIWSProtocolResolver - openaiWSInitMu sync.Mutex - openaiWSPool *openAIWSConnPool - openaiWSStateStore OpenAIWSStateStore - openaiScheduler OpenAIAccountScheduler - openaiAccountStats *openAIAccountRuntimeStats - - openaiWSFallbackMu sync.Mutex - openaiWSFallbackUntil map[int64]time.Time + openaiWSPoolOnce sync.Once + openaiWSStateStoreOnce sync.Once + openaiSchedulerOnce sync.Once + openaiWSPool *openAIWSConnPool + openaiWSStateStore OpenAIWSStateStore + openaiScheduler OpenAIAccountScheduler + openaiAccountStats *openAIAccountRuntimeStats + + openaiWSFallbackUntil sync.Map // key: int64(accountID), value: time.Time openaiWSRetryMetrics openAIWSRetryMetrics } diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 6d2aab1e4..159195870 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -1,6 +1,7 @@ package service import ( + "bytes" "context" "encoding/json" "errors" @@ -233,6 +234,38 @@ func shouldLogOpenAIWSBufferedEvent(idx int) bool { return false } +func openAIWSEventMayContainModel(eventType string) bool { + switch strings.TrimSpace(eventType) { + case "response.created", + "response.in_progress", + "response.completed", + "response.done", + "response.failed", + "response.incomplete", + "response.cancelled", + "response.canceled": + return true + default: + return false + } +} + +func openAIWSEventMayContainToolCalls(eventType string) bool { + eventType = strings.TrimSpace(eventType) + if eventType == "" { + return false + } + if strings.Contains(eventType, "function_call") || strings.Contains(eventType, "tool_call") { + return true + } + switch eventType { + case "response.output_item.added", "response.output_item.done", "response.completed", "response.done": + return true + default: + return false + } +} + func summarizeOpenAIWSErrorEventFields(message []byte) (code string, errType string, errMessage string) { if len(message) == 0 { return "-", "-", "-" @@ -493,7 +526,14 @@ func logOpenAIWSModeInfo(format string, args ...any) { logger.LegacyPrintf("service.openai_gateway", "[OpenAI WS Mode][openai_ws_mode=true] "+format, args...) } +func isOpenAIWSModeDebugEnabled() bool { + return logger.L().Core().Enabled(zap.DebugLevel) +} + func logOpenAIWSModeDebug(format string, args ...any) { + if !isOpenAIWSModeDebugEnabled() { + return + } logger.LegacyPrintf("service.openai_gateway", "[debug] [OpenAI WS Mode][openai_ws_mode=true] "+format, args...) } @@ -567,11 +607,11 @@ func (s *OpenAIGatewayService) getOpenAIWSConnPool() *openAIWSConnPool { if s == nil { return nil } - s.openaiWSInitMu.Lock() - defer s.openaiWSInitMu.Unlock() - if s.openaiWSPool == nil { - s.openaiWSPool = newOpenAIWSConnPool(s.cfg) - } + s.openaiWSPoolOnce.Do(func() { + if s.openaiWSPool == nil { + s.openaiWSPool = newOpenAIWSConnPool(s.cfg) + } + }) return s.openaiWSPool } @@ -606,11 +646,11 @@ func (s *OpenAIGatewayService) getOpenAIWSStateStore() OpenAIWSStateStore { if s == nil { return nil } - s.openaiWSInitMu.Lock() - defer s.openaiWSInitMu.Unlock() - if s.openaiWSStateStore == nil { - s.openaiWSStateStore = NewOpenAIWSStateStore(s.cache) - } + s.openaiWSStateStoreOnce.Do(func() { + if s.openaiWSStateStore == nil { + s.openaiWSStateStore = NewOpenAIWSStateStore(s.cache) + } + }) return s.openaiWSStateStore } @@ -698,6 +738,16 @@ func (s *OpenAIGatewayService) openAIWSDialTimeout() time.Duration { return 10 * time.Second } +func (s *OpenAIGatewayService) openAIWSAcquireTimeout() time.Duration { + // Acquire 覆盖“连接复用命中/排队/新建连接”三个阶段。 + // 这里不再叠加 write_timeout,避免高并发排队时把 TTFT 长尾拉到分钟级。 + dial := s.openAIWSDialTimeout() + if dial <= 0 { + dial = 10 * time.Second + } + return dial + 2*time.Second +} + func (s *OpenAIGatewayService) buildOpenAIResponsesWSURL(account *Account) (string, error) { if account == nil { return "", errors.New("account is nil") @@ -1057,7 +1107,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( account.ProxyID != nil && account.Proxy != nil, ) - acquireCtx, acquireCancel := context.WithTimeout(ctx, s.openAIWSDialTimeout()+s.openAIWSWriteTimeout()) + acquireCtx, acquireCancel := context.WithTimeout(ctx, s.openAIWSAcquireTimeout()) defer acquireCancel() lease, err := s.getOpenAIWSConnPool().Acquire(acquireCtx, openAIWSAcquireRequest{ @@ -1197,6 +1247,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } clientDisconnected := false + debugEnabled := isOpenAIWSModeDebugEnabled() flushBatchSize := s.openAIWSEventFlushBatchSize() flushInterval := s.openAIWSEventFlushInterval() pendingFlushEvents := 0 @@ -1218,7 +1269,13 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if clientDisconnected { return } - if _, wErr := fmt.Fprintf(c.Writer, "data: %s\n\n", message); wErr == nil { + wErr := error(nil) + if _, wErr = io.WriteString(c.Writer, "data: "); wErr == nil { + if _, wErr = c.Writer.Write(message); wErr == nil { + _, wErr = io.WriteString(c.Writer, "\n\n") + } + } + if wErr == nil { wroteDownstream = true pendingFlushEvents++ flushStreamWriter(forceFlush) @@ -1238,15 +1295,17 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( bufferedStreamEvents = bufferedStreamEvents[:0] flushStreamWriter(true) flushedBufferedEventCount += flushed - logOpenAIWSModeDebug( - "buffer_flush account_id=%d conn_id=%s reason=%s flushed=%d total_flushed=%d client_disconnected=%v", - account.ID, - connID, - truncateOpenAIWSLogValue(reason, openAIWSLogValueMaxLen), - flushed, - flushedBufferedEventCount, - clientDisconnected, - ) + if debugEnabled { + logOpenAIWSModeDebug( + "buffer_flush account_id=%d conn_id=%s reason=%s flushed=%d total_flushed=%d client_disconnected=%v", + account.ID, + connID, + truncateOpenAIWSLogValue(reason, openAIWSLogValueMaxLen), + flushed, + flushedBufferedEventCount, + clientDisconnected, + ) + } } readTimeout := s.openAIWSReadTimeout() @@ -1308,7 +1367,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( ms := int(time.Since(startTime).Milliseconds()) firstTokenMs = &ms } - if shouldLogOpenAIWSEvent(eventCount, eventType) { + if debugEnabled && shouldLogOpenAIWSEvent(eventCount, eventType) { logOpenAIWSModeDebug( "event_received account_id=%d conn_id=%s idx=%d type=%s bytes=%d token=%v terminal=%v buffered_pending=%d", account.ID, @@ -1322,11 +1381,13 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( ) } - if needModelReplace { + if needModelReplace && openAIWSEventMayContainModel(eventType) { message = replaceOpenAIWSMessageModel(message, mappedModel, originalModel) } - if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(message); changed { - message = corrected + if openAIWSEventMayContainToolCalls(eventType) { + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(message); changed { + message = corrected + } } s.parseSSEUsageBytes(message, usage) @@ -1349,6 +1410,8 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( errMessage, ) if !wroteDownstream && canFallback { + // 避免复用“已返回 error 且可能被上游关闭”的连接,防止下一轮重试空转 read_fail。 + lease.MarkBroken() return nil, wrapOpenAIWSFallback(fallbackReason, errors.New(errMsg)) } statusCode := openAIWSErrorHTTPStatus(message) @@ -1377,7 +1440,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( copy(buffered, message) bufferedStreamEvents = append(bufferedStreamEvents, buffered) bufferedEventCount++ - if shouldLogOpenAIWSBufferedEvent(bufferedEventCount) { + if debugEnabled && shouldLogOpenAIWSBufferedEvent(bufferedEventCount) { logOpenAIWSModeDebug( "buffer_enqueue account_id=%d conn_id=%s idx=%d event_idx=%d event_type=%s buffer_size=%d", account.ID, @@ -1519,18 +1582,25 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( if err != nil { return fmt.Errorf("build ws url: %w", err) } + wsHost := "-" + wsPath := "-" + if parsedURL, parseErr := url.Parse(wsURL); parseErr == nil && parsedURL != nil { + wsHost = normalizeOpenAIWSLogValue(parsedURL.Host) + wsPath = normalizeOpenAIWSLogValue(parsedURL.Path) + } + debugEnabled := isOpenAIWSModeDebugEnabled() parseClientPayload := func(raw []byte) (map[string]any, string, string, string, string, error) { - trimmed := strings.TrimSpace(string(raw)) - if trimmed == "" { + trimmed := bytes.TrimSpace(raw) + if len(trimmed) == 0 { return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "empty websocket request payload", nil) } - if !gjson.Valid(trimmed) { + if !gjson.ValidBytes(trimmed) { return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", nil) } payload := make(map[string]any) - if err := json.Unmarshal([]byte(trimmed), &payload); err != nil { + if err := json.Unmarshal(trimmed, &payload); err != nil { return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) } @@ -1612,25 +1682,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( isCodexCLI := openai.IsCodexCLIRequest(c.GetHeader("User-Agent")) || (s.cfg != nil && s.cfg.Gateway.ForceCodexCLI) wsHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), firstPromptCacheKey) - - logOpenAIWSModeDebug( - "ingress_ws_start account_id=%d account_type=%s transport=%s ws_host=%s preferred_conn_id=%s has_session_hash=%v has_previous_response_id=%v store_disabled=%v", - account.ID, - account.Type, - normalizeOpenAIWSLogValue(string(wsDecision.Transport)), - normalizeOpenAIWSLogValue(func() string { - if parsed, parseErr := url.Parse(wsURL); parseErr == nil && parsed != nil { - return parsed.Host - } - return "-" - }()), - truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), - sessionHash != "", - firstPreviousResponseID != "", - storeDisabled, - ) - - lease, err := s.getOpenAIWSConnPool().Acquire(ctx, openAIWSAcquireRequest{ + baseAcquireReq := openAIWSAcquireRequest{ Account: account, WSURL: wsURL, Headers: wsHeaders, @@ -1640,18 +1692,84 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } return "" }(), - PreferredConnID: preferredConnID, - ForceNewConn: false, - }) - if err != nil { - return fmt.Errorf("acquire upstream websocket: %w", err) + ForceNewConn: false, + } + pool := s.getOpenAIWSConnPool() + if pool == nil { + return errors.New("openai ws conn pool is nil") } - defer lease.Release() - if handshakeTurnState := strings.TrimSpace(lease.HandshakeHeader(openAIWSTurnStateHeader)); handshakeTurnState != "" { - if stateStore != nil && sessionHash != "" { - stateStore.BindSessionTurnState(groupID, sessionHash, handshakeTurnState, s.openAIWSSessionStickyTTL()) + logOpenAIWSModeInfo( + "ingress_ws_protocol_confirm account_id=%d account_type=%s transport=%s ws_host=%s ws_path=%s store_disabled=%v has_session_hash=%v has_previous_response_id=%v", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(wsDecision.Transport)), + wsHost, + wsPath, + storeDisabled, + sessionHash != "", + firstPreviousResponseID != "", + ) + + if debugEnabled { + logOpenAIWSModeDebug( + "ingress_ws_start account_id=%d account_type=%s transport=%s ws_host=%s preferred_conn_id=%s has_session_hash=%v has_previous_response_id=%v store_disabled=%v", + account.ID, + account.Type, + normalizeOpenAIWSLogValue(string(wsDecision.Transport)), + wsHost, + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + sessionHash != "", + firstPreviousResponseID != "", + storeDisabled, + ) + } + + acquireTimeout := s.openAIWSAcquireTimeout() + if acquireTimeout <= 0 { + acquireTimeout = 30 * time.Second + } + + acquireTurnLease := func(turn int, preferred string) (*openAIWSConnLease, error) { + req := cloneOpenAIWSAcquireRequest(baseAcquireReq) + req.PreferredConnID = strings.TrimSpace(preferred) + acquireCtx, acquireCancel := context.WithTimeout(ctx, acquireTimeout) + lease, acquireErr := pool.Acquire(acquireCtx, req) + acquireCancel() + if acquireErr != nil { + if errors.Is(acquireErr, context.DeadlineExceeded) || errors.Is(acquireErr, errOpenAIWSConnQueueFull) { + return nil, NewOpenAIWSClientCloseError( + coderws.StatusTryAgainLater, + "upstream websocket is busy, please retry later", + acquireErr, + ) + } + return nil, acquireErr } + connID := strings.TrimSpace(lease.ConnID()) + if handshakeTurnState := strings.TrimSpace(lease.HandshakeHeader(openAIWSTurnStateHeader)); handshakeTurnState != "" { + turnState = handshakeTurnState + if stateStore != nil && sessionHash != "" { + stateStore.BindSessionTurnState(groupID, sessionHash, handshakeTurnState, s.openAIWSSessionStickyTTL()) + } + updatedHeaders := cloneHeader(baseAcquireReq.Headers) + if updatedHeaders == nil { + updatedHeaders = make(http.Header) + } + updatedHeaders.Set(openAIWSTurnStateHeader, handshakeTurnState) + baseAcquireReq.Headers = updatedHeaders + } + logOpenAIWSModeInfo( + "ingress_ws_upstream_connected account_id=%d turn=%d conn_id=%s conn_reused=%v conn_pick_ms=%d queue_wait_ms=%d preferred_conn_id=%s", + account.ID, + turn, + truncateOpenAIWSLogValue(connID, openAIWSIDValueMaxLen), + lease.Reused(), + lease.ConnPickDuration().Milliseconds(), + lease.QueueWaitDuration().Milliseconds(), + truncateOpenAIWSLogValue(preferred, openAIWSIDValueMaxLen), + ) + return lease, nil } writeClientMessage := func(message []byte) error { @@ -1675,16 +1793,40 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return payload, nil } - sendAndRelay := func(payload map[string]any, originalModel string) (*OpenAIForwardResult, error) { + sendAndRelay := func(turn int, lease *openAIWSConnLease, payload map[string]any, originalModel string) (*OpenAIForwardResult, error) { + if lease == nil { + return nil, errors.New("upstream websocket lease is nil") + } turnStart := time.Now() if err := lease.WriteJSON(payload, s.openAIWSWriteTimeout()); err != nil { return nil, fmt.Errorf("write upstream websocket request: %w", err) } + logOpenAIWSModeInfo( + "ingress_ws_turn_request_sent account_id=%d turn=%d conn_id=%s payload_bytes=%d", + account.ID, + turn, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + len(payloadAsJSONBytes(payload)), + ) responseID := "" usage := OpenAIUsage{} var firstTokenMs *int reqStream := true + eventCount := 0 + tokenEventCount := 0 + terminalEventCount := 0 + firstEventType := "" + lastEventType := "" + needModelReplace := false + mappedModel := "" + if originalModel != "" { + mappedModel = account.GetMappedModel(originalModel) + if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { + mappedModel = normalizedModel + } + needModelReplace = mappedModel != "" && mappedModel != originalModel + } if streamValue, ok := payload["stream"].(bool); ok { reqStream = streamValue } @@ -1699,28 +1841,57 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( responseID = strings.TrimSpace(extractOpenAIWSResponseID(upstreamMessage)) } eventType := strings.TrimSpace(gjson.GetBytes(upstreamMessage, "type").String()) - if firstTokenMs == nil && isOpenAIWSTokenEvent(eventType) { + if eventType != "" { + eventCount++ + if firstEventType == "" { + firstEventType = eventType + } + lastEventType = eventType + } + isTokenEvent := isOpenAIWSTokenEvent(eventType) + if isTokenEvent { + tokenEventCount++ + } + isTerminalEvent := isOpenAIWSTerminalEvent(eventType) + if isTerminalEvent { + terminalEventCount++ + } + if firstTokenMs == nil && isTokenEvent { ms := int(time.Since(turnStart).Milliseconds()) firstTokenMs = &ms } s.parseSSEUsageBytes(upstreamMessage, &usage) - if originalModel != "" { - mappedModel := account.GetMappedModel(originalModel) - if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { - mappedModel = normalizedModel - } - if mappedModel != "" && mappedModel != originalModel { - upstreamMessage = replaceOpenAIWSMessageModel(upstreamMessage, mappedModel, originalModel) - } + if needModelReplace && openAIWSEventMayContainModel(eventType) { + upstreamMessage = replaceOpenAIWSMessageModel(upstreamMessage, mappedModel, originalModel) } - if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(upstreamMessage); changed { - upstreamMessage = corrected + if openAIWSEventMayContainToolCalls(eventType) { + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(upstreamMessage); changed { + upstreamMessage = corrected + } } if err := writeClientMessage(upstreamMessage); err != nil { return nil, fmt.Errorf("write client websocket event: %w", err) } - if isOpenAIWSTerminalEvent(eventType) { + if isTerminalEvent { + firstTokenMsValue := -1 + if firstTokenMs != nil { + firstTokenMsValue = *firstTokenMs + } + logOpenAIWSModeInfo( + "ingress_ws_turn_completed account_id=%d turn=%d conn_id=%s response_id=%s duration_ms=%d events=%d token_events=%d terminal_events=%d first_event=%s last_event=%s first_token_ms=%d", + account.ID, + turn, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(responseID, openAIWSIDValueMaxLen), + time.Since(turnStart).Milliseconds(), + eventCount, + tokenEventCount, + terminalEventCount, + truncateOpenAIWSLogValue(firstEventType, openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(lastEventType, openAIWSLogValueMaxLen), + firstTokenMsValue, + ) return &OpenAIForwardResult{ RequestID: responseID, Usage: usage, @@ -1743,7 +1914,13 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return err } } - result, relayErr := sendAndRelay(currentPayload, currentOriginalModel) + turnLease, acquireErr := acquireTurnLease(turn, preferredConnID) + if acquireErr != nil { + return fmt.Errorf("acquire upstream websocket: %w", acquireErr) + } + connID := strings.TrimSpace(turnLease.ConnID()) + result, relayErr := sendAndRelay(turn, turnLease, currentPayload, currentOriginalModel) + turnLease.Release() if hooks != nil && hooks.AfterTurn != nil { hooks.AfterTurn(turn, result, relayErr) } @@ -1758,10 +1935,13 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( if responseID != "" && stateStore != nil { ttl := s.openAIWSResponseStickyTTL() _ = stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl) - stateStore.BindResponseConn(responseID, lease.ConnID(), ttl) + stateStore.BindResponseConn(responseID, connID, ttl) } if stateStore != nil && storeDisabled && sessionHash != "" { - stateStore.BindSessionConn(groupID, sessionHash, lease.ConnID(), s.openAIWSSessionStickyTTL()) + stateStore.BindSessionConn(groupID, sessionHash, connID, s.openAIWSSessionStickyTTL()) + } + if connID != "" { + preferredConnID = connID } nextClientMessage, readErr := readClientMessage() @@ -1771,7 +1951,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( logOpenAIWSModeInfo( "ingress_ws_client_closed account_id=%d conn_id=%s close_status=%s close_reason=%s", account.ID, - truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(connID, openAIWSIDValueMaxLen), closeStatus, truncateOpenAIWSLogValue(closeReason, openAIWSHeaderValueMaxLen), ) @@ -1780,12 +1960,22 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return fmt.Errorf("read client websocket request: %w", readErr) } - nextPayload, _, _, _, nextOriginalModel, parseErr := parseClientPayload(nextClientMessage) + nextPayload, _, nextPromptCacheKey, nextPreviousResponseID, nextOriginalModel, parseErr := parseClientPayload(nextClientMessage) if parseErr != nil { return parseErr } + if nextPromptCacheKey != "" { + updatedHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), nextPromptCacheKey) + baseAcquireReq.Headers = updatedHeaders + } + if stateStore != nil && nextPreviousResponseID != "" { + if stickyConnID, ok := stateStore.GetResponseConn(nextPreviousResponseID); ok { + preferredConnID = stickyConnID + } + } currentPayload = nextPayload currentOriginalModel = nextOriginalModel + storeDisabled = s.isOpenAIWSStoreDisabledInRequest(currentPayload, account) turn++ } } @@ -2244,16 +2434,19 @@ func (s *OpenAIGatewayService) isOpenAIWSFallbackCooling(accountID int64) bool { if cooldown <= 0 { return false } - s.openaiWSFallbackMu.Lock() - defer s.openaiWSFallbackMu.Unlock() - until := s.openaiWSFallbackUntil[accountID] - if until.IsZero() { + rawUntil, ok := s.openaiWSFallbackUntil.Load(accountID) + if !ok || rawUntil == nil { + return false + } + until, ok := rawUntil.(time.Time) + if !ok || until.IsZero() { + s.openaiWSFallbackUntil.Delete(accountID) return false } if time.Now().Before(until) { return true } - delete(s.openaiWSFallbackUntil, accountID) + s.openaiWSFallbackUntil.Delete(accountID) return false } @@ -2265,21 +2458,12 @@ func (s *OpenAIGatewayService) markOpenAIWSFallbackCooling(accountID int64, _ st if cooldown <= 0 { return } - s.openaiWSFallbackMu.Lock() - if s.openaiWSFallbackUntil == nil { - s.openaiWSFallbackUntil = make(map[int64]time.Time, 32) - } - s.openaiWSFallbackUntil[accountID] = time.Now().Add(cooldown) - s.openaiWSFallbackMu.Unlock() + s.openaiWSFallbackUntil.Store(accountID, time.Now().Add(cooldown)) } func (s *OpenAIGatewayService) clearOpenAIWSFallbackCooling(accountID int64) { if s == nil || accountID <= 0 { return } - s.openaiWSFallbackMu.Lock() - if s.openaiWSFallbackUntil != nil { - delete(s.openaiWSFallbackUntil, accountID) - } - s.openaiWSFallbackMu.Unlock() + s.openaiWSFallbackUntil.Delete(accountID) } diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index 04a91ba1c..e6c0a6178 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -16,10 +16,11 @@ import ( ) const ( - openAIWSConnMaxAge = 60 * time.Minute - openAIWSConnHealthCheckIdle = 90 * time.Second - openAIWSConnHealthCheckTO = 2 * time.Second - openAIWSConnPrewarmExtraDelay = 2 * time.Second + openAIWSConnMaxAge = 60 * time.Minute + openAIWSConnHealthCheckIdle = 90 * time.Second + openAIWSConnHealthCheckTO = 2 * time.Second + openAIWSConnPrewarmExtraDelay = 2 * time.Second + openAIWSAcquireCleanupInterval = 3 * time.Second openAIWSPrewarmFailureWindow = 30 * time.Second openAIWSPrewarmFailureSuppress = 2 @@ -465,6 +466,7 @@ type openAIWSAccountPool struct { mu sync.Mutex conns map[string]*openAIWSConn creating int + lastCleanupAt time.Time lastAcquire *openAIWSAcquireRequest prewarmActive bool prewarmUntil time.Time @@ -501,8 +503,7 @@ type openAIWSConnPool struct { // 通过接口解耦底层 WS 客户端实现,默认使用 coder/websocket。 clientDialer openAIWSClientDialer - mu sync.RWMutex - accounts map[int64]*openAIWSAccountPool + accounts sync.Map // key: int64(accountID), value: *openAIWSAccountPool seq atomic.Uint64 metrics openAIWSPoolMetrics @@ -512,7 +513,6 @@ func newOpenAIWSConnPool(cfg *config.Config) *openAIWSConnPool { return &openAIWSConnPool{ cfg: cfg, clientDialer: newDefaultOpenAIWSClientDialer(), - accounts: make(map[int64]*openAIWSAccountPool), } } @@ -571,7 +571,11 @@ func (p *openAIWSConnPool) acquire(ctx context.Context, req openAIWSAcquireReque ap := p.getOrCreateAccountPool(accountID) ap.mu.Lock() ap.lastAcquire = cloneOpenAIWSAcquireRequestPtr(&req) - evicted = p.cleanupAccountLocked(ap, time.Now(), effectiveMaxConns) + now := time.Now() + if ap.lastCleanupAt.IsZero() || now.Sub(ap.lastCleanupAt) >= openAIWSAcquireCleanupInterval { + evicted = p.cleanupAccountLocked(ap, now, effectiveMaxConns) + ap.lastCleanupAt = now + } pickStartedAt := time.Now() allowReuse := !req.ForceNewConn @@ -775,39 +779,34 @@ func (p *openAIWSConnPool) getOrCreateAccountPool(accountID int64) *openAIWSAcco if p == nil || accountID <= 0 { return nil } - p.mu.Lock() - defer p.mu.Unlock() - ap, ok := p.accounts[accountID] - if ok && ap != nil { - return ap + if existing, ok := p.accounts.Load(accountID); ok { + if ap, typed := existing.(*openAIWSAccountPool); typed && ap != nil { + return ap + } + } + ap := &openAIWSAccountPool{conns: make(map[string]*openAIWSConn)} + actual, _ := p.accounts.LoadOrStore(accountID, ap) + if typed, ok := actual.(*openAIWSAccountPool); ok && typed != nil { + return typed } - ap = &openAIWSAccountPool{conns: make(map[string]*openAIWSConn)} - p.accounts[accountID] = ap return ap } -// ensureAccountPoolLocked 兼容旧调用:调用方需已持有 p.mu 锁。 +// ensureAccountPoolLocked 兼容旧调用。 func (p *openAIWSConnPool) ensureAccountPoolLocked(accountID int64) *openAIWSAccountPool { - if p == nil || accountID <= 0 { - return nil - } - ap, ok := p.accounts[accountID] - if ok && ap != nil { - return ap - } - ap = &openAIWSAccountPool{conns: make(map[string]*openAIWSConn)} - p.accounts[accountID] = ap - return ap + return p.getOrCreateAccountPool(accountID) } func (p *openAIWSConnPool) getAccountPool(accountID int64) (*openAIWSAccountPool, bool) { if p == nil || accountID <= 0 { return nil, false } - p.mu.RLock() - defer p.mu.RUnlock() - ap, ok := p.accounts[accountID] - return ap, ok && ap != nil + value, ok := p.accounts.Load(accountID) + if !ok || value == nil { + return nil, false + } + ap, typed := value.(*openAIWSAccountPool) + return ap, typed && ap != nil } func (p *openAIWSConnPool) cleanupAccountLocked(ap *openAIWSAccountPool, now time.Time, maxConns int) []*openAIWSConn { diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index 73867f14d..0b6d517a1 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -241,7 +241,6 @@ func TestOpenAIWSConnPool_AcquireQueueWaitMetrics(t *testing.T) { conn := newOpenAIWSConn("busy", accountID, &openAIWSFakeConn{}, nil) require.True(t, conn.tryAcquire()) // 占用连接,触发后续排队 - pool.mu.Lock() ap := pool.ensureAccountPoolLocked(accountID) ap.mu.Lock() ap.conns[conn.id] = conn @@ -250,7 +249,6 @@ func TestOpenAIWSConnPool_AcquireQueueWaitMetrics(t *testing.T) { WSURL: "wss://example.com/v1/responses", } ap.mu.Unlock() - pool.mu.Unlock() go func() { time.Sleep(60 * time.Millisecond) diff --git a/backend/internal/service/openai_ws_state_store.go b/backend/internal/service/openai_ws_state_store.go index c00c4054d..e15ac5100 100644 --- a/backend/internal/service/openai_ws_state_store.go +++ b/backend/internal/service/openai_ws_state_store.go @@ -14,6 +14,8 @@ import ( const ( openAIWSResponseAccountCachePrefix = "openai:response:" openAIWSStateStoreCleanupInterval = time.Minute + openAIWSStateStoreCleanupMaxPerMap = 512 + openAIWSStateStoreMaxEntriesPerMap = 65536 ) type openAIWSAccountBinding struct { @@ -63,11 +65,14 @@ type OpenAIWSStateStore interface { type defaultOpenAIWSStateStore struct { cache GatewayCache - mu sync.RWMutex - responseToAccount map[string]openAIWSAccountBinding - responseToConn map[string]openAIWSConnBinding - sessionToTurnState map[string]openAIWSTurnStateBinding - sessionToConn map[string]openAIWSSessionConnBinding + responseToAccountMu sync.RWMutex + responseToAccount map[string]openAIWSAccountBinding + responseToConnMu sync.RWMutex + responseToConn map[string]openAIWSConnBinding + sessionToTurnStateMu sync.RWMutex + sessionToTurnState map[string]openAIWSTurnStateBinding + sessionToConnMu sync.RWMutex + sessionToConn map[string]openAIWSSessionConnBinding lastCleanupUnixNano atomic.Int64 } @@ -94,9 +99,10 @@ func (s *defaultOpenAIWSStateStore) BindResponseAccount(ctx context.Context, gro s.maybeCleanup() expiresAt := time.Now().Add(ttl) - s.mu.Lock() + s.responseToAccountMu.Lock() + ensureBindingCapacity(s.responseToAccount, id, openAIWSStateStoreMaxEntriesPerMap) s.responseToAccount[id] = openAIWSAccountBinding{accountID: accountID, expiresAt: expiresAt} - s.mu.Unlock() + s.responseToAccountMu.Unlock() if s.cache == nil { return nil @@ -113,15 +119,15 @@ func (s *defaultOpenAIWSStateStore) GetResponseAccount(ctx context.Context, grou s.maybeCleanup() now := time.Now() - s.mu.RLock() + s.responseToAccountMu.RLock() if binding, ok := s.responseToAccount[id]; ok { if now.Before(binding.expiresAt) { accountID := binding.accountID - s.mu.RUnlock() + s.responseToAccountMu.RUnlock() return accountID, nil } } - s.mu.RUnlock() + s.responseToAccountMu.RUnlock() if s.cache == nil { return 0, nil @@ -141,9 +147,9 @@ func (s *defaultOpenAIWSStateStore) DeleteResponseAccount(ctx context.Context, g if id == "" { return nil } - s.mu.Lock() + s.responseToAccountMu.Lock() delete(s.responseToAccount, id) - s.mu.Unlock() + s.responseToAccountMu.Unlock() if s.cache == nil { return nil @@ -160,12 +166,13 @@ func (s *defaultOpenAIWSStateStore) BindResponseConn(responseID, connID string, ttl = normalizeOpenAIWSTTL(ttl) s.maybeCleanup() - s.mu.Lock() + s.responseToConnMu.Lock() + ensureBindingCapacity(s.responseToConn, id, openAIWSStateStoreMaxEntriesPerMap) s.responseToConn[id] = openAIWSConnBinding{ connID: conn, expiresAt: time.Now().Add(ttl), } - s.mu.Unlock() + s.responseToConnMu.Unlock() } func (s *defaultOpenAIWSStateStore) GetResponseConn(responseID string) (string, bool) { @@ -176,9 +183,9 @@ func (s *defaultOpenAIWSStateStore) GetResponseConn(responseID string) (string, s.maybeCleanup() now := time.Now() - s.mu.RLock() + s.responseToConnMu.RLock() binding, ok := s.responseToConn[id] - s.mu.RUnlock() + s.responseToConnMu.RUnlock() if !ok || now.After(binding.expiresAt) || strings.TrimSpace(binding.connID) == "" { return "", false } @@ -190,9 +197,9 @@ func (s *defaultOpenAIWSStateStore) DeleteResponseConn(responseID string) { if id == "" { return } - s.mu.Lock() + s.responseToConnMu.Lock() delete(s.responseToConn, id) - s.mu.Unlock() + s.responseToConnMu.Unlock() } func (s *defaultOpenAIWSStateStore) BindSessionTurnState(groupID int64, sessionHash, turnState string, ttl time.Duration) { @@ -204,12 +211,13 @@ func (s *defaultOpenAIWSStateStore) BindSessionTurnState(groupID int64, sessionH ttl = normalizeOpenAIWSTTL(ttl) s.maybeCleanup() - s.mu.Lock() + s.sessionToTurnStateMu.Lock() + ensureBindingCapacity(s.sessionToTurnState, key, openAIWSStateStoreMaxEntriesPerMap) s.sessionToTurnState[key] = openAIWSTurnStateBinding{ turnState: state, expiresAt: time.Now().Add(ttl), } - s.mu.Unlock() + s.sessionToTurnStateMu.Unlock() } func (s *defaultOpenAIWSStateStore) GetSessionTurnState(groupID int64, sessionHash string) (string, bool) { @@ -220,9 +228,9 @@ func (s *defaultOpenAIWSStateStore) GetSessionTurnState(groupID int64, sessionHa s.maybeCleanup() now := time.Now() - s.mu.RLock() + s.sessionToTurnStateMu.RLock() binding, ok := s.sessionToTurnState[key] - s.mu.RUnlock() + s.sessionToTurnStateMu.RUnlock() if !ok || now.After(binding.expiresAt) || strings.TrimSpace(binding.turnState) == "" { return "", false } @@ -234,9 +242,9 @@ func (s *defaultOpenAIWSStateStore) DeleteSessionTurnState(groupID int64, sessio if key == "" { return } - s.mu.Lock() + s.sessionToTurnStateMu.Lock() delete(s.sessionToTurnState, key) - s.mu.Unlock() + s.sessionToTurnStateMu.Unlock() } func (s *defaultOpenAIWSStateStore) BindSessionConn(groupID int64, sessionHash, connID string, ttl time.Duration) { @@ -248,12 +256,13 @@ func (s *defaultOpenAIWSStateStore) BindSessionConn(groupID int64, sessionHash, ttl = normalizeOpenAIWSTTL(ttl) s.maybeCleanup() - s.mu.Lock() + s.sessionToConnMu.Lock() + ensureBindingCapacity(s.sessionToConn, key, openAIWSStateStoreMaxEntriesPerMap) s.sessionToConn[key] = openAIWSSessionConnBinding{ connID: conn, expiresAt: time.Now().Add(ttl), } - s.mu.Unlock() + s.sessionToConnMu.Unlock() } func (s *defaultOpenAIWSStateStore) GetSessionConn(groupID int64, sessionHash string) (string, bool) { @@ -264,9 +273,9 @@ func (s *defaultOpenAIWSStateStore) GetSessionConn(groupID int64, sessionHash st s.maybeCleanup() now := time.Now() - s.mu.RLock() + s.sessionToConnMu.RLock() binding, ok := s.sessionToConn[key] - s.mu.RUnlock() + s.sessionToConnMu.RUnlock() if !ok || now.After(binding.expiresAt) || strings.TrimSpace(binding.connID) == "" { return "", false } @@ -278,9 +287,9 @@ func (s *defaultOpenAIWSStateStore) DeleteSessionConn(groupID int64, sessionHash if key == "" { return } - s.mu.Lock() + s.sessionToConnMu.Lock() delete(s.sessionToConn, key) - s.mu.Unlock() + s.sessionToConnMu.Unlock() } func (s *defaultOpenAIWSStateStore) maybeCleanup() { @@ -296,56 +305,99 @@ func (s *defaultOpenAIWSStateStore) maybeCleanup() { return } - s.mu.Lock() - defer s.mu.Unlock() - // 固定采样会在高流量场景下产生清理追不上写入的风险;这里按全量过期扫描兜底。 - cleanupExpiredAccountBindings(s.responseToAccount, now) - cleanupExpiredConnBindings(s.responseToConn, now) - cleanupExpiredTurnStateBindings(s.sessionToTurnState, now) - cleanupExpiredSessionConnBindings(s.sessionToConn, now) + // 增量限额清理,避免高规模下一次性全量扫描导致长时间阻塞。 + s.responseToAccountMu.Lock() + cleanupExpiredAccountBindings(s.responseToAccount, now, openAIWSStateStoreCleanupMaxPerMap) + s.responseToAccountMu.Unlock() + + s.responseToConnMu.Lock() + cleanupExpiredConnBindings(s.responseToConn, now, openAIWSStateStoreCleanupMaxPerMap) + s.responseToConnMu.Unlock() + + s.sessionToTurnStateMu.Lock() + cleanupExpiredTurnStateBindings(s.sessionToTurnState, now, openAIWSStateStoreCleanupMaxPerMap) + s.sessionToTurnStateMu.Unlock() + + s.sessionToConnMu.Lock() + cleanupExpiredSessionConnBindings(s.sessionToConn, now, openAIWSStateStoreCleanupMaxPerMap) + s.sessionToConnMu.Unlock() } -func cleanupExpiredAccountBindings(bindings map[string]openAIWSAccountBinding, now time.Time) { - if len(bindings) == 0 { +func cleanupExpiredAccountBindings(bindings map[string]openAIWSAccountBinding, now time.Time, maxScan int) { + if len(bindings) == 0 || maxScan <= 0 { return } + scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } + scanned++ + if scanned >= maxScan { + break + } } } -func cleanupExpiredConnBindings(bindings map[string]openAIWSConnBinding, now time.Time) { - if len(bindings) == 0 { +func cleanupExpiredConnBindings(bindings map[string]openAIWSConnBinding, now time.Time, maxScan int) { + if len(bindings) == 0 || maxScan <= 0 { return } + scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } + scanned++ + if scanned >= maxScan { + break + } } } -func cleanupExpiredTurnStateBindings(bindings map[string]openAIWSTurnStateBinding, now time.Time) { - if len(bindings) == 0 { +func cleanupExpiredTurnStateBindings(bindings map[string]openAIWSTurnStateBinding, now time.Time, maxScan int) { + if len(bindings) == 0 || maxScan <= 0 { return } + scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } + scanned++ + if scanned >= maxScan { + break + } } } -func cleanupExpiredSessionConnBindings(bindings map[string]openAIWSSessionConnBinding, now time.Time) { - if len(bindings) == 0 { +func cleanupExpiredSessionConnBindings(bindings map[string]openAIWSSessionConnBinding, now time.Time, maxScan int) { + if len(bindings) == 0 || maxScan <= 0 { return } + scanned := 0 for key, binding := range bindings { if now.After(binding.expiresAt) { delete(bindings, key) } + scanned++ + if scanned >= maxScan { + break + } + } +} + +func ensureBindingCapacity[T any](bindings map[string]T, incomingKey string, maxEntries int) { + if len(bindings) < maxEntries || maxEntries <= 0 { + return + } + if _, exists := bindings[incomingKey]; exists { + return + } + // 固定上限保护:淘汰任意一项,优先保证内存有界。 + for key := range bindings { + delete(bindings, key) + return } } diff --git a/backend/internal/service/openai_ws_state_store_test.go b/backend/internal/service/openai_ws_state_store_test.go index 4e8dc90fd..17c54cac2 100644 --- a/backend/internal/service/openai_ws_state_store_test.go +++ b/backend/internal/service/openai_ws_state_store_test.go @@ -93,27 +93,64 @@ func TestOpenAIWSStateStore_GetResponseAccount_NoStaleAfterCacheMiss(t *testing. require.Zero(t, accountID, "上游缓存失效后不应继续命中本地陈旧映射") } -func TestOpenAIWSStateStore_MaybeCleanupRemovesAllExpired(t *testing.T) { +func TestOpenAIWSStateStore_MaybeCleanupRemovesExpiredIncrementally(t *testing.T) { raw := NewOpenAIWSStateStore(nil) store, ok := raw.(*defaultOpenAIWSStateStore) require.True(t, ok) expiredAt := time.Now().Add(-time.Minute) total := 2048 - store.mu.Lock() + store.responseToConnMu.Lock() for i := 0; i < total; i++ { store.responseToConn[fmt.Sprintf("resp_%d", i)] = openAIWSConnBinding{ connID: "conn_incremental", expiresAt: expiredAt, } } - store.mu.Unlock() + store.responseToConnMu.Unlock() store.lastCleanupUnixNano.Store(time.Now().Add(-2 * openAIWSStateStoreCleanupInterval).UnixNano()) store.maybeCleanup() - store.mu.RLock() + store.responseToConnMu.RLock() + remainingAfterFirst := len(store.responseToConn) + store.responseToConnMu.RUnlock() + require.Less(t, remainingAfterFirst, total, "单轮 cleanup 应至少有进展") + require.Greater(t, remainingAfterFirst, 0, "增量清理不要求单轮清空全部键") + + for i := 0; i < 8; i++ { + store.lastCleanupUnixNano.Store(time.Now().Add(-2 * openAIWSStateStoreCleanupInterval).UnixNano()) + store.maybeCleanup() + } + + store.responseToConnMu.RLock() remaining := len(store.responseToConn) - store.mu.RUnlock() - require.Zero(t, remaining, "单轮 cleanup 应清空全部过期键,避免固定速率清理造成堆积") + store.responseToConnMu.RUnlock() + require.Zero(t, remaining, "多轮 cleanup 后应逐步清空全部过期键") +} + +func TestEnsureBindingCapacity_EvictsOneWhenMapIsFull(t *testing.T) { + bindings := map[string]int{ + "a": 1, + "b": 2, + } + + ensureBindingCapacity(bindings, "c", 2) + bindings["c"] = 3 + + require.Len(t, bindings, 2) + require.Equal(t, 3, bindings["c"]) +} + +func TestEnsureBindingCapacity_DoesNotEvictWhenUpdatingExistingKey(t *testing.T) { + bindings := map[string]int{ + "a": 1, + "b": 2, + } + + ensureBindingCapacity(bindings, "a", 2) + bindings["a"] = 9 + + require.Len(t, bindings, 2) + require.Equal(t, 9, bindings["a"]) } diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index cc9ae7d69..a35ab27d0 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -222,9 +222,9 @@ gateway: responses_websockets: false responses_websockets_v2: true # 连接池参数(按账号池化复用) - max_conns_per_account: 8 - min_idle_per_account: 1 - max_idle_per_account: 4 + max_conns_per_account: 24 + min_idle_per_account: 4 + max_idle_per_account: 12 # 是否按账号并发动态计算连接池上限: # effective_max_conns = min(max_conns_per_account, ceil(account.concurrency * factor)) dynamic_max_conns_by_account_concurrency_enabled: true @@ -235,10 +235,10 @@ gateway: read_timeout_seconds: 900 write_timeout_seconds: 120 pool_target_utilization: 0.7 - queue_limit_per_conn: 256 + queue_limit_per_conn: 64 # 流式写出批量 flush 参数 - event_flush_batch_size: 4 - event_flush_interval_ms: 25 + event_flush_batch_size: 1 + event_flush_interval_ms: 10 # 预热触发冷却(毫秒) prewarm_cooldown_ms: 300 # WS 回退到 HTTP 后的冷却时间(秒),用于避免 WS/HTTP 来回抖动;0 表示关闭冷却 @@ -248,8 +248,8 @@ gateway: retry_backoff_max_ms: 2000 # 抖动比例(0-1) retry_jitter_ratio: 0.2 - # 单次请求 WS 重试总预算(毫秒);0 表示关闭预算限制,不因预算回退 HTTP - retry_total_budget_ms: 0 + # 单次请求 WS 重试总预算(毫秒);建议设置为有限值,避免重试拉高 TTFT 长尾 + retry_total_budget_ms: 5000 # payload_schema 日志采样率(0-1);降低热路径日志放大 payload_log_sample_rate: 0.2 # 调度与粘连参数 From e957bb369393c96703dd85c5d59c3a536d587f84 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 08:43:13 +0800 Subject: [PATCH 012/120] =?UTF-8?q?fix(openai-ws):=20=E4=BF=AE=E5=A4=8Ding?= =?UTF-8?q?ress=E4=BC=9A=E8=AF=9Dturn=E9=97=B4=E9=87=8A=E6=94=BElease?= =?UTF-8?q?=E5=AF=BC=E8=87=B4=E4=B8=B2=E6=89=B0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../internal/service/openai_ws_forwarder.go | 49 +++++- ...penai_ws_forwarder_ingress_session_test.go | 149 ++++++++++++++++++ 2 files changed, 191 insertions(+), 7 deletions(-) create mode 100644 backend/internal/service/openai_ws_forwarder_ingress_session_test.go diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 159195870..afbe9c8a7 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -1907,6 +1907,23 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( currentPayload := firstPayload currentOriginalModel := firstOriginalModel + var sessionLease *openAIWSConnLease + sessionConnID := "" + releaseSessionLease := func() { + if sessionLease == nil { + return + } + sessionLease.Release() + if debugEnabled { + logOpenAIWSModeDebug( + "ingress_ws_upstream_released account_id=%d conn_id=%s", + account.ID, + truncateOpenAIWSLogValue(sessionConnID, openAIWSIDValueMaxLen), + ) + } + } + defer releaseSessionLease() + turn := 1 for { if hooks != nil && hooks.BeforeTurn != nil { @@ -1914,17 +1931,22 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return err } } - turnLease, acquireErr := acquireTurnLease(turn, preferredConnID) - if acquireErr != nil { - return fmt.Errorf("acquire upstream websocket: %w", acquireErr) + if sessionLease == nil { + acquiredLease, acquireErr := acquireTurnLease(turn, preferredConnID) + if acquireErr != nil { + return fmt.Errorf("acquire upstream websocket: %w", acquireErr) + } + sessionLease = acquiredLease + sessionConnID = strings.TrimSpace(sessionLease.ConnID()) } - connID := strings.TrimSpace(turnLease.ConnID()) - result, relayErr := sendAndRelay(turn, turnLease, currentPayload, currentOriginalModel) - turnLease.Release() + connID := sessionConnID + + result, relayErr := sendAndRelay(turn, sessionLease, currentPayload, currentOriginalModel) if hooks != nil && hooks.AfterTurn != nil { hooks.AfterTurn(turn, result, relayErr) } if relayErr != nil { + sessionLease.MarkBroken() return relayErr } if result == nil { @@ -1965,12 +1987,25 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return parseErr } if nextPromptCacheKey != "" { + // ingress 会话在整个客户端 WS 生命周期内复用同一上游连接; + // prompt_cache_key 对握手头的更新仅在未来需要重新建连时生效。 updatedHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), nextPromptCacheKey) baseAcquireReq.Headers = updatedHeaders } if stateStore != nil && nextPreviousResponseID != "" { if stickyConnID, ok := stateStore.GetResponseConn(nextPreviousResponseID); ok { - preferredConnID = stickyConnID + if sessionConnID != "" && stickyConnID != "" && stickyConnID != sessionConnID { + logOpenAIWSModeInfo( + "ingress_ws_keep_session_conn account_id=%d turn=%d conn_id=%s sticky_conn_id=%s previous_response_id=%s", + account.ID, + turn, + truncateOpenAIWSLogValue(sessionConnID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(stickyConnID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(nextPreviousResponseID, openAIWSIDValueMaxLen), + ) + } else { + preferredConnID = stickyConnID + } } } currentPayload = nextPayload diff --git a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go new file mode 100644 index 000000000..99227b8f0 --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go @@ -0,0 +1,149 @@ +package service + +import ( + "context" + "errors" + "net/http" + "net/http/httptest" + "strings" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + coderws "github.com/coder/websocket" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" + "github.com/tidwall/gjson" +) + +func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossTurns(t *testing.T) { + gin.SetMode(gin.TestMode) + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + captureConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_ingress_turn_1","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + []byte(`{"type":"response.completed","response":{"id":"resp_ingress_turn_2","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 114, + Name: "openai-ingress-session-lease", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + serverErrCh := make(chan error, 1) + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := coderws.Accept(w, r, &coderws.AcceptOptions{ + CompressionMode: coderws.CompressionContextTakeover, + }) + if err != nil { + serverErrCh <- err + return + } + defer conn.CloseNow() + + rec := httptest.NewRecorder() + ginCtx, _ := gin.CreateTestContext(rec) + req := r.Clone(r.Context()) + req.Header = req.Header.Clone() + req.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + ginCtx.Request = req + + readCtx, cancel := context.WithTimeout(r.Context(), 3*time.Second) + msgType, firstMessage, readErr := conn.Read(readCtx) + cancel() + if readErr != nil { + serverErrCh <- readErr + return + } + if msgType != coderws.MessageText && msgType != coderws.MessageBinary { + serverErrCh <- errors.New("unsupported websocket client message type") + return + } + + serverErrCh <- svc.ProxyResponsesWebSocketFromClient(r.Context(), ginCtx, conn, account, "sk-test", firstMessage, nil) + })) + defer wsServer.Close() + + dialCtx, cancelDial := context.WithTimeout(context.Background(), 3*time.Second) + clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http"), nil) + cancelDial() + require.NoError(t, err) + defer clientConn.CloseNow() + + writeMessage := func(payload string) { + writeCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + require.NoError(t, clientConn.Write(writeCtx, coderws.MessageText, []byte(payload))) + } + readMessage := func() []byte { + readCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + msgType, message, readErr := clientConn.Read(readCtx) + require.NoError(t, readErr) + require.Equal(t, coderws.MessageText, msgType) + return message + } + + writeMessage(`{"type":"response.create","model":"gpt-5.1","stream":false}`) + firstTurnEvent := readMessage() + require.Equal(t, "response.completed", gjson.GetBytes(firstTurnEvent, "type").String()) + require.Equal(t, "resp_ingress_turn_1", gjson.GetBytes(firstTurnEvent, "response.id").String()) + + writeMessage(`{"type":"response.create","model":"gpt-5.1","stream":false,"previous_response_id":"resp_ingress_turn_1"}`) + secondTurnEvent := readMessage() + require.Equal(t, "response.completed", gjson.GetBytes(secondTurnEvent, "type").String()) + require.Equal(t, "resp_ingress_turn_2", gjson.GetBytes(secondTurnEvent, "response.id").String()) + + require.NoError(t, clientConn.Close(coderws.StatusNormalClosure, "done")) + + select { + case serverErr := <-serverErrCh: + require.NoError(t, serverErr) + case <-time.After(5 * time.Second): + t.Fatal("等待 ingress websocket 结束超时") + } + + metrics := svc.SnapshotOpenAIWSPoolMetrics() + require.Equal(t, int64(1), metrics.AcquireTotal, "同一 ingress 会话多 turn 应只获取一次上游 lease") + require.Equal(t, 1, captureDialer.DialCount(), "同一 ingress 会话应保持同一上游连接") + require.Len(t, captureConn.writes, 2, "应向同一上游连接发送两轮 response.create") +} From 5947215c6fb0943bd52a69cfc9e04a5927171fa1 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 09:28:39 +0800 Subject: [PATCH 013/120] =?UTF-8?q?feat(ws):=20=E5=AE=8C=E6=88=90=20OpenAI?= =?UTF-8?q?=20WS=20Mode=20=E5=85=A8=E9=93=BE=E8=B7=AF=E8=90=BD=E5=BA=93?= =?UTF-8?q?=E4=B8=8E=E7=B1=BB=E5=9E=8B=E5=B1=95=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/handler/dto/mappers.go | 1 + .../handler/dto/mappers_usage_test.go | 28 ++++ backend/internal/handler/dto/types.go | 1 + .../handler/openai_gateway_handler.go | 27 +-- .../migrations_schema_integration_test.go | 1 + backend/internal/repository/usage_log_repo.go | 9 +- .../usage_log_repo_integration_test.go | 26 +++ .../service/openai_gateway_service.go | 4 + .../internal/service/openai_ws_forwarder.go | 157 +++++++++++------- backend/internal/service/usage_log.go | 1 + .../060_add_usage_log_openai_ws_mode.sql | 2 + .../src/components/admin/usage/UsageTable.vue | 16 +- frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + frontend/src/types/index.ts | 1 + frontend/src/views/admin/UsageView.vue | 6 +- frontend/src/views/user/UsageView.vue | 22 ++- 17 files changed, 222 insertions(+), 82 deletions(-) create mode 100644 backend/internal/handler/dto/mappers_usage_test.go create mode 100644 backend/migrations/060_add_usage_log_openai_ws_mode.sql diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index 42ff4a843..e399e7b99 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -410,6 +410,7 @@ func usageLogFromServiceUser(l *service.UsageLog) UsageLog { RateMultiplier: l.RateMultiplier, BillingType: l.BillingType, Stream: l.Stream, + OpenAIWSMode: l.OpenAIWSMode, DurationMs: l.DurationMs, FirstTokenMs: l.FirstTokenMs, ImageCount: l.ImageCount, diff --git a/backend/internal/handler/dto/mappers_usage_test.go b/backend/internal/handler/dto/mappers_usage_test.go new file mode 100644 index 000000000..22d910001 --- /dev/null +++ b/backend/internal/handler/dto/mappers_usage_test.go @@ -0,0 +1,28 @@ +package dto + +import ( + "testing" + + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestUsageLogFromService_IncludesOpenAIWSMode(t *testing.T) { + t.Parallel() + + wsLog := &service.UsageLog{ + RequestID: "req_1", + Model: "gpt-5.3-codex", + OpenAIWSMode: true, + } + httpLog := &service.UsageLog{ + RequestID: "resp_1", + Model: "gpt-5.3-codex", + OpenAIWSMode: false, + } + + require.True(t, UsageLogFromService(wsLog).OpenAIWSMode) + require.False(t, UsageLogFromService(httpLog).OpenAIWSMode) + require.True(t, UsageLogFromServiceAdmin(wsLog).OpenAIWSMode) + require.False(t, UsageLogFromServiceAdmin(httpLog).OpenAIWSMode) +} diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index 0cd1b2413..c95a84899 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -280,6 +280,7 @@ type UsageLog struct { BillingType int8 `json:"billing_type"` Stream bool `json:"stream"` + OpenAIWSMode bool `json:"openai_ws_mode"` DurationMs *int `json:"duration_ms"` FirstTokenMs *int `json:"first_token_ms"` diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 8d5810385..b039d9f6f 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -525,17 +525,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { ) setOpsRequestContext(c, reqModel, true, firstMessage) - userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency) - if err != nil { - reqLog.Warn("openai.websocket_user_slot_acquire_failed", zap.Error(err)) - closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to acquire user concurrency slot") - return - } - if !userAcquired { - closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "too many concurrent requests, please retry later") - return - } - currentUserRelease := wrapReleaseOnDone(ctx, userReleaseFunc) + var currentUserRelease func() var currentAccountRelease func() releaseTurnSlots := func() { if currentAccountRelease != nil { @@ -547,8 +537,21 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { currentUserRelease = nil } } + // 必须尽早注册,确保任何 early return 都能释放已获取的并发槽位。 defer releaseTurnSlots() + userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency) + if err != nil { + reqLog.Warn("openai.websocket_user_slot_acquire_failed", zap.Error(err)) + closeOpenAIClientWS(wsConn, coderws.StatusInternalError, "failed to acquire user concurrency slot") + return + } + if !userAcquired { + closeOpenAIClientWS(wsConn, coderws.StatusTryAgainLater, "too many concurrent requests, please retry later") + return + } + currentUserRelease = wrapReleaseOnDone(ctx, userReleaseFunc) + subscription, _ := middleware2.GetSubscriptionFromContext(c) if err := h.billingCacheService.CheckBillingEligibility(ctx, apiKey.User, apiKey, apiKey.Group, subscription); err != nil { reqLog.Info("openai.websocket_billing_eligibility_check_failed", zap.Error(err)) @@ -624,6 +627,8 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { if turn == 1 { return nil } + // 防御式清理:避免异常路径下旧槽位覆盖导致泄漏。 + releaseTurnSlots() // 非首轮 turn 需要重新抢占并发槽位,避免长连接空闲占槽。 userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, subject.UserID, subject.Concurrency) if err != nil { diff --git a/backend/internal/repository/migrations_schema_integration_test.go b/backend/internal/repository/migrations_schema_integration_test.go index f50d2b26d..e94aa9054 100644 --- a/backend/internal/repository/migrations_schema_integration_test.go +++ b/backend/internal/repository/migrations_schema_integration_test.go @@ -42,6 +42,7 @@ func TestMigrationsRunner_IsIdempotent_AndSchemaIsUpToDate(t *testing.T) { // usage_logs: billing_type used by filters/stats requireColumn(t, tx, "usage_logs", "billing_type", "smallint", 0, false) + requireColumn(t, tx, "usage_logs", "openai_ws_mode", "boolean", 0, false) // settings table should exist var settingsRegclass sql.NullString diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index ce67ba4d8..ddad79363 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -22,7 +22,7 @@ import ( "github.com/lib/pq" ) -const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, stream, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, media_type, reasoning_effort, cache_ttl_overridden, created_at" +const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, stream, openai_ws_mode, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, media_type, reasoning_effort, cache_ttl_overridden, created_at" // dateFormatWhitelist 将 granularity 参数映射为 PostgreSQL TO_CHAR 格式字符串,防止外部输入直接拼入 SQL var dateFormatWhitelist = map[string]string{ @@ -124,6 +124,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) account_rate_multiplier, billing_type, stream, + openai_ws_mode, duration_ms, first_token_ms, user_agent, @@ -140,7 +141,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, - $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33 + $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34 ) ON CONFLICT (request_id, api_key_id) DO NOTHING RETURNING id, created_at @@ -185,6 +186,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) log.AccountRateMultiplier, log.BillingType, log.Stream, + log.OpenAIWSMode, duration, firstToken, userAgent, @@ -2268,6 +2270,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e accountRateMultiplier sql.NullFloat64 billingType int16 stream bool + openaiWSMode bool durationMs sql.NullInt64 firstTokenMs sql.NullInt64 userAgent sql.NullString @@ -2305,6 +2308,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e &accountRateMultiplier, &billingType, &stream, + &openaiWSMode, &durationMs, &firstTokenMs, &userAgent, @@ -2341,6 +2345,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e AccountRateMultiplier: nullFloat64Ptr(accountRateMultiplier), BillingType: int8(billingType), Stream: stream, + OpenAIWSMode: openaiWSMode, ImageCount: imageCount, CacheTTLOverridden: cacheTTLOverridden, CreatedAt: createdAt, diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go index 8cb3aab11..1947fb6e1 100644 --- a/backend/internal/repository/usage_log_repo_integration_test.go +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -130,6 +130,32 @@ func (s *UsageLogRepoSuite) TestGetByID_ReturnsAccountRateMultiplier() { s.Require().InEpsilon(0.5, *got.AccountRateMultiplier, 0.0001) } +func (s *UsageLogRepoSuite) TestGetByID_ReturnsOpenAIWSMode() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid-ws@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-getbyid-ws", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid-ws"}) + + log := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "gpt-5.3-codex", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1.0, + ActualCost: 1.0, + OpenAIWSMode: true, + CreatedAt: timezone.Today().Add(3 * time.Hour), + } + _, err := s.repo.Create(s.ctx, log) + s.Require().NoError(err) + + got, err := s.repo.GetByID(s.ctx, log.ID) + s.Require().NoError(err) + s.Require().True(got.OpenAIWSMode) +} + // --- Delete --- func (s *UsageLogRepoSuite) TestDelete() { diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 54f00ab7a..2bc7d9641 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -209,6 +209,7 @@ type OpenAIForwardResult struct { // Stored for usage records display; nil means not provided / not applicable. ReasoningEffort *string Stream bool + OpenAIWSMode bool Duration time.Duration FirstTokenMs *int } @@ -1784,6 +1785,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco Model: originalModel, ReasoningEffort: reasoningEffort, Stream: reqStream, + OpenAIWSMode: false, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, }, nil @@ -1937,6 +1939,7 @@ func (s *OpenAIGatewayService) forwardOpenAIPassthrough( Model: reqModel, ReasoningEffort: reasoningEffort, Stream: reqStream, + OpenAIWSMode: false, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, }, nil @@ -3199,6 +3202,7 @@ func (s *OpenAIGatewayService) RecordUsage(ctx context.Context, input *OpenAIRec AccountRateMultiplier: &accountRateMultiplier, BillingType: billingType, Stream: result.Stream, + OpenAIWSMode: result.OpenAIWSMode, DurationMs: &durationMs, FirstTokenMs: result.FirstTokenMs, CreatedAt: time.Now(), diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index afbe9c8a7..063489fe6 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -3,6 +3,8 @@ package service import ( "bytes" "context" + "crypto/sha256" + "encoding/hex" "encoding/json" "errors" "fmt" @@ -393,6 +395,15 @@ func openAIWSPayloadString(payload map[string]any, key string) string { } } +func openAIWSSessionHashFromID(sessionID string) string { + normalized := strings.TrimSpace(sessionID) + if normalized == "" { + return "" + } + sum := sha256.Sum256([]byte(normalized)) + return hex.EncodeToString(sum[:]) +} + func extractOpenAIWSImageURL(value any) string { switch v := value.(type) { case string: @@ -1016,6 +1027,15 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( previousResponseID := openAIWSPayloadString(payload, "previous_response_id") promptCacheKey := openAIWSPayloadString(payload, "prompt_cache_key") _, hasTools := payload["tools"] + debugEnabled := isOpenAIWSModeDebugEnabled() + payloadBytes := -1 + resolvePayloadBytes := func() int { + if payloadBytes >= 0 { + return payloadBytes + } + payloadBytes = len(payloadAsJSONBytes(payload)) + return payloadBytes + } streamValue := "-" if raw, ok := payload["stream"]; ok { streamValue = normalizeOpenAIWSLogValue(strings.TrimSpace(fmt.Sprintf("%v", raw))) @@ -1027,7 +1047,6 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( turnMetadata = strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)) } setOpenAIWSTurnMetadata(payload, turnMetadata) - payloadJSON := payloadAsJSONBytes(payload) payloadEventType := openAIWSPayloadString(payload, "type") if payloadEventType == "" { payloadEventType = "response.create" @@ -1039,7 +1058,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( attempt, payloadEventType, normalizeOpenAIWSLogValue(strings.Join(sortedKeys(payload), ",")), - len(payloadJSON), + resolvePayloadBytes(), normalizeOpenAIWSLogValue(summarizeOpenAIWSPayloadKeySizes(payload, openAIWSPayloadKeySizeTopN)), normalizeOpenAIWSLogValue(summarizeOpenAIWSInput(payload["input"])), streamValue, @@ -1053,7 +1072,10 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( stateStore := s.getOpenAIWSStateStore() groupID := getOpenAIGroupIDFromContext(c) - sessionHash := s.GenerateSessionHash(c, payloadJSON) + sessionHash := s.GenerateSessionHash(c, nil) + if sessionHash == "" { + sessionHash = openAIWSSessionHashFromID(promptCacheKey) + } if turnState == "" && stateStore != nil && sessionHash != "" { if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { turnState = savedTurnState @@ -1201,18 +1223,20 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( account.ID, connID, truncateOpenAIWSLogValue(err.Error(), openAIWSLogValueMaxLen), - len(payloadJSON), + resolvePayloadBytes(), ) return nil, wrapOpenAIWSFallback("write_request", err) } - logOpenAIWSModeDebug( - "write_request_sent account_id=%d conn_id=%s stream=%v payload_bytes=%d previous_response_id=%s", - account.ID, - connID, - reqStream, - len(payloadJSON), - truncateOpenAIWSLogValue(previousResponseID, openAIWSIDValueMaxLen), - ) + if debugEnabled { + logOpenAIWSModeDebug( + "write_request_sent account_id=%d conn_id=%s stream=%v payload_bytes=%d previous_response_id=%s", + account.ID, + connID, + reqStream, + resolvePayloadBytes(), + truncateOpenAIWSLogValue(previousResponseID, openAIWSIDValueMaxLen), + ) + } usage := &OpenAIUsage{} var firstTokenMs *int @@ -1247,7 +1271,6 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } clientDisconnected := false - debugEnabled := isOpenAIWSModeDebugEnabled() flushBatchSize := s.openAIWSEventFlushBatchSize() flushInterval := s.openAIWSEventFlushInterval() pendingFlushEvents := 0 @@ -1590,52 +1613,62 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } debugEnabled := isOpenAIWSModeDebugEnabled() - parseClientPayload := func(raw []byte) (map[string]any, string, string, string, string, error) { + type openAIWSClientPayload struct { + payload map[string]any + promptCacheKey string + previousResponseID string + originalModel string + payloadBytes int + trimmedRaw []byte + } + + parseClientPayload := func(raw []byte) (openAIWSClientPayload, error) { trimmed := bytes.TrimSpace(raw) if len(trimmed) == 0 { - return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "empty websocket request payload", nil) - } - if !gjson.ValidBytes(trimmed) { - return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", nil) - } - - payload := make(map[string]any) - if err := json.Unmarshal(trimmed, &payload); err != nil { - return nil, "", "", "", "", NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "empty websocket request payload", nil) } - eventType := openAIWSPayloadString(payload, "type") + values := gjson.GetManyBytes(trimmed, "type", "model", "prompt_cache_key", "previous_response_id") + eventType := strings.TrimSpace(values[0].String()) if eventType == "" { - payload["type"] = "response.create" eventType = "response.create" } if eventType != "response.create" { if eventType == "response.append" { - return nil, "", "", "", "", NewOpenAIWSClientCloseError( + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( coderws.StatusPolicyViolation, "response.append is not supported in ws v2; use response.create with previous_response_id", nil, ) } - return nil, "", "", "", "", NewOpenAIWSClientCloseError( + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( coderws.StatusPolicyViolation, fmt.Sprintf("unsupported websocket request type: %s", eventType), nil, ) } - if turnMetadata := strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)); turnMetadata != "" { - setOpenAIWSTurnMetadata(payload, turnMetadata) - } - - originalModel := strings.TrimSpace(openAIWSPayloadString(payload, "model")) + originalModel := strings.TrimSpace(values[1].String()) if originalModel == "" { - return nil, "", "", "", "", NewOpenAIWSClientCloseError( + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( coderws.StatusPolicyViolation, "model is required in response.create payload", nil, ) } + promptCacheKey := strings.TrimSpace(values[2].String()) + previousResponseID := strings.TrimSpace(values[3].String()) + + payload := make(map[string]any) + if err := json.Unmarshal(trimmed, &payload); err != nil { + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) + } + if _, exists := payload["type"]; !exists { + payload["type"] = "response.create" + } + if turnMetadata := strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)); turnMetadata != "" { + setOpenAIWSTurnMetadata(payload, turnMetadata) + } mappedModel := originalModel mappedModel = account.GetMappedModel(originalModel) if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { @@ -1645,12 +1678,17 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( payload["model"] = mappedModel } - promptCacheKey := strings.TrimSpace(openAIWSPayloadString(payload, "prompt_cache_key")) - previousResponseID := strings.TrimSpace(openAIWSPayloadString(payload, "previous_response_id")) - return payload, eventType, promptCacheKey, previousResponseID, originalModel, nil + return openAIWSClientPayload{ + payload: payload, + promptCacheKey: promptCacheKey, + previousResponseID: previousResponseID, + originalModel: originalModel, + payloadBytes: len(trimmed), + trimmedRaw: trimmed, + }, nil } - firstPayload, _, firstPromptCacheKey, firstPreviousResponseID, firstOriginalModel, err := parseClientPayload(firstClientMessage) + firstPayload, err := parseClientPayload(firstClientMessage) if err != nil { return err } @@ -1658,8 +1696,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( turnState := strings.TrimSpace(c.GetHeader(openAIWSTurnStateHeader)) stateStore := s.getOpenAIWSStateStore() groupID := getOpenAIGroupIDFromContext(c) - firstPayloadJSON := payloadAsJSONBytes(firstPayload) - sessionHash := s.GenerateSessionHash(c, firstPayloadJSON) + sessionHash := s.GenerateSessionHash(c, firstPayload.trimmedRaw) if turnState == "" && stateStore != nil && sessionHash != "" { if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { turnState = savedTurnState @@ -1667,21 +1704,21 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } preferredConnID := "" - if stateStore != nil && firstPreviousResponseID != "" { - if connID, ok := stateStore.GetResponseConn(firstPreviousResponseID); ok { + if stateStore != nil && firstPayload.previousResponseID != "" { + if connID, ok := stateStore.GetResponseConn(firstPayload.previousResponseID); ok { preferredConnID = connID } } - storeDisabled := s.isOpenAIWSStoreDisabledInRequest(firstPayload, account) - if stateStore != nil && storeDisabled && firstPreviousResponseID == "" && sessionHash != "" { + storeDisabled := s.isOpenAIWSStoreDisabledInRequest(firstPayload.payload, account) + if stateStore != nil && storeDisabled && firstPayload.previousResponseID == "" && sessionHash != "" { if connID, ok := stateStore.GetSessionConn(groupID, sessionHash); ok { preferredConnID = connID } } isCodexCLI := openai.IsCodexCLIRequest(c.GetHeader("User-Agent")) || (s.cfg != nil && s.cfg.Gateway.ForceCodexCLI) - wsHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), firstPromptCacheKey) + wsHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), firstPayload.promptCacheKey) baseAcquireReq := openAIWSAcquireRequest{ Account: account, WSURL: wsURL, @@ -1708,7 +1745,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( wsPath, storeDisabled, sessionHash != "", - firstPreviousResponseID != "", + firstPayload.previousResponseID != "", ) if debugEnabled { @@ -1720,7 +1757,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( wsHost, truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), sessionHash != "", - firstPreviousResponseID != "", + firstPayload.previousResponseID != "", storeDisabled, ) } @@ -1793,7 +1830,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return payload, nil } - sendAndRelay := func(turn int, lease *openAIWSConnLease, payload map[string]any, originalModel string) (*OpenAIForwardResult, error) { + sendAndRelay := func(turn int, lease *openAIWSConnLease, payload map[string]any, payloadBytes int, originalModel string) (*OpenAIForwardResult, error) { if lease == nil { return nil, errors.New("upstream websocket lease is nil") } @@ -1806,7 +1843,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( account.ID, turn, truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), - len(payloadAsJSONBytes(payload)), + payloadBytes, ) responseID := "" @@ -1905,8 +1942,9 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } } - currentPayload := firstPayload - currentOriginalModel := firstOriginalModel + currentPayload := firstPayload.payload + currentOriginalModel := firstPayload.originalModel + currentPayloadBytes := firstPayload.payloadBytes var sessionLease *openAIWSConnLease sessionConnID := "" releaseSessionLease := func() { @@ -1941,7 +1979,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } connID := sessionConnID - result, relayErr := sendAndRelay(turn, sessionLease, currentPayload, currentOriginalModel) + result, relayErr := sendAndRelay(turn, sessionLease, currentPayload, currentPayloadBytes, currentOriginalModel) if hooks != nil && hooks.AfterTurn != nil { hooks.AfterTurn(turn, result, relayErr) } @@ -1982,18 +2020,18 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return fmt.Errorf("read client websocket request: %w", readErr) } - nextPayload, _, nextPromptCacheKey, nextPreviousResponseID, nextOriginalModel, parseErr := parseClientPayload(nextClientMessage) + nextPayload, parseErr := parseClientPayload(nextClientMessage) if parseErr != nil { return parseErr } - if nextPromptCacheKey != "" { + if nextPayload.promptCacheKey != "" { // ingress 会话在整个客户端 WS 生命周期内复用同一上游连接; // prompt_cache_key 对握手头的更新仅在未来需要重新建连时生效。 - updatedHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), nextPromptCacheKey) + updatedHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), nextPayload.promptCacheKey) baseAcquireReq.Headers = updatedHeaders } - if stateStore != nil && nextPreviousResponseID != "" { - if stickyConnID, ok := stateStore.GetResponseConn(nextPreviousResponseID); ok { + if stateStore != nil && nextPayload.previousResponseID != "" { + if stickyConnID, ok := stateStore.GetResponseConn(nextPayload.previousResponseID); ok { if sessionConnID != "" && stickyConnID != "" && stickyConnID != sessionConnID { logOpenAIWSModeInfo( "ingress_ws_keep_session_conn account_id=%d turn=%d conn_id=%s sticky_conn_id=%s previous_response_id=%s", @@ -2001,15 +2039,16 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( turn, truncateOpenAIWSLogValue(sessionConnID, openAIWSIDValueMaxLen), truncateOpenAIWSLogValue(stickyConnID, openAIWSIDValueMaxLen), - truncateOpenAIWSLogValue(nextPreviousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(nextPayload.previousResponseID, openAIWSIDValueMaxLen), ) } else { preferredConnID = stickyConnID } } } - currentPayload = nextPayload - currentOriginalModel = nextOriginalModel + currentPayload = nextPayload.payload + currentOriginalModel = nextPayload.originalModel + currentPayloadBytes = nextPayload.payloadBytes storeDisabled = s.isOpenAIWSStoreDisabledInRequest(currentPayload, account) turn++ } diff --git a/backend/internal/service/usage_log.go b/backend/internal/service/usage_log.go index f98241835..73748a6d8 100644 --- a/backend/internal/service/usage_log.go +++ b/backend/internal/service/usage_log.go @@ -41,6 +41,7 @@ type UsageLog struct { BillingType int8 Stream bool + OpenAIWSMode bool DurationMs *int FirstTokenMs *int UserAgent *string diff --git a/backend/migrations/060_add_usage_log_openai_ws_mode.sql b/backend/migrations/060_add_usage_log_openai_ws_mode.sql new file mode 100644 index 000000000..b7d224142 --- /dev/null +++ b/backend/migrations/060_add_usage_log_openai_ws_mode.sql @@ -0,0 +1,2 @@ +-- Add openai_ws_mode flag to usage_logs to persist exact OpenAI WS transport type. +ALTER TABLE usage_logs ADD COLUMN IF NOT EXISTS openai_ws_mode BOOLEAN NOT NULL DEFAULT FALSE; diff --git a/frontend/src/components/admin/usage/UsageTable.vue b/frontend/src/components/admin/usage/UsageTable.vue index fc54b6676..4ef5e1a79 100644 --- a/frontend/src/components/admin/usage/UsageTable.vue +++ b/frontend/src/components/admin/usage/UsageTable.vue @@ -35,8 +35,8 @@ @@ -306,6 +306,18 @@ const cols = computed(() => [ { key: 'ip_address', label: t('admin.usage.ipAddress'), sortable: false } ]) +const getRequestTypeLabel = (row: AdminUsageLog): string => { + if (row.openai_ws_mode) return t('usage.ws') + return row.stream ? t('usage.stream') : t('usage.sync') +} + +const getRequestTypeBadgeClass = (row: AdminUsageLog): string => { + if (row.openai_ws_mode) return 'bg-violet-100 text-violet-800 dark:bg-violet-900 dark:text-violet-200' + return row.stream + ? 'bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200' + : 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200' +} + const formatCacheTokens = (tokens: number): string => { if (tokens >= 1000000) return `${(tokens / 1000000).toFixed(1)}M` if (tokens >= 1000) return `${(tokens / 1000).toFixed(1)}K` diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index 8620ac7d4..e5a0233e6 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -613,6 +613,7 @@ export default { firstToken: 'First Token', duration: 'Duration', time: 'Time', + ws: 'WS', stream: 'Stream', sync: 'Sync', in: 'In', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 94b350424..951944e44 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -619,6 +619,7 @@ export default { firstToken: '首 Token', duration: '耗时', time: '时间', + ws: 'WS', stream: '流式', sync: '同步', in: '输入', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index a54cfceff..7db66f0c9 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -889,6 +889,7 @@ export interface UsageLog { billing_type: number stream: boolean + openai_ws_mode?: boolean duration_ms: number first_token_ms: number | null diff --git a/frontend/src/views/admin/UsageView.vue b/frontend/src/views/admin/UsageView.vue index dbc81f3a5..9c4544ecd 100644 --- a/frontend/src/views/admin/UsageView.vue +++ b/frontend/src/views/admin/UsageView.vue @@ -89,6 +89,10 @@ const handlePageChange = (p: number) => { pagination.page = p; loadLogs() } const handlePageSizeChange = (s: number) => { pagination.page_size = s; pagination.page = 1; loadLogs() } const cancelExport = () => exportAbortController?.abort() const openCleanupDialog = () => { cleanupDialogVisible.value = true } +const getRequestTypeLabel = (log: AdminUsageLog): string => { + if (log.openai_ws_mode) return t('usage.ws') + return log.stream ? t('usage.stream') : t('usage.sync') +} const exportToExcel = async () => { if (exporting.value) return; exporting.value = true; exportProgress.show = true @@ -114,7 +118,7 @@ const exportToExcel = async () => { if (c.signal.aborted) break; if (p === 1) { total = res.total; exportProgress.total = total } const rows = (res.items || []).map((log: AdminUsageLog) => [ log.created_at, log.user?.email || '', log.api_key?.name || '', log.account?.name || '', log.model, - formatReasoningEffort(log.reasoning_effort), log.group?.name || '', log.stream ? t('usage.stream') : t('usage.sync'), + formatReasoningEffort(log.reasoning_effort), log.group?.name || '', getRequestTypeLabel(log), log.input_tokens, log.output_tokens, log.cache_read_tokens, log.cache_creation_tokens, log.input_cost?.toFixed(6) || '0.000000', log.output_cost?.toFixed(6) || '0.000000', log.cache_read_cost?.toFixed(6) || '0.000000', log.cache_creation_cost?.toFixed(6) || '0.000000', diff --git a/frontend/src/views/user/UsageView.vue b/frontend/src/views/user/UsageView.vue index 53a11702c..f4046918c 100644 --- a/frontend/src/views/user/UsageView.vue +++ b/frontend/src/views/user/UsageView.vue @@ -166,13 +166,9 @@ @@ -577,6 +573,18 @@ const formatUserAgent = (ua: string): string => { return ua } +const getRequestTypeLabel = (log: UsageLog): string => { + if (log.openai_ws_mode) return t('usage.ws') + return log.stream ? t('usage.stream') : t('usage.sync') +} + +const getRequestTypeBadgeClass = (log: UsageLog): string => { + if (log.openai_ws_mode) return 'bg-violet-100 text-violet-800 dark:bg-violet-900 dark:text-violet-200' + return log.stream + ? 'bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200' + : 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200' +} + const formatTokens = (value: number): string => { if (value >= 1_000_000_000) { return `${(value / 1_000_000_000).toFixed(2)}B` @@ -768,7 +776,7 @@ const exportToCSV = async () => { log.api_key?.name || '', log.model, formatReasoningEffort(log.reasoning_effort), - log.stream ? 'Stream' : 'Sync', + log.openai_ws_mode ? 'WS' : log.stream ? 'Stream' : 'Sync', log.input_tokens, log.output_tokens, log.cache_read_tokens, From 0805eac6b42e56fa8c9c54110f0c9f8e73d1e4b7 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 09:31:30 +0800 Subject: [PATCH 014/120] =?UTF-8?q?fix(config):=20=E5=B0=86=20max=5Fconns?= =?UTF-8?q?=5Fper=5Faccount=20=E9=BB=98=E8=AE=A4=E5=80=BC=E8=B0=83?= =?UTF-8?q?=E6=95=B4=E4=B8=BA=20128?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/config/config.go | 2 +- deploy/config.example.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index 02a57d92b..c5be017a3 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -1259,7 +1259,7 @@ func setDefaults() { viper.SetDefault("gateway.openai_ws.prewarm_generate_enabled", false) viper.SetDefault("gateway.openai_ws.responses_websockets", false) viper.SetDefault("gateway.openai_ws.responses_websockets_v2", true) - viper.SetDefault("gateway.openai_ws.max_conns_per_account", 24) + viper.SetDefault("gateway.openai_ws.max_conns_per_account", 128) viper.SetDefault("gateway.openai_ws.min_idle_per_account", 4) viper.SetDefault("gateway.openai_ws.max_idle_per_account", 12) viper.SetDefault("gateway.openai_ws.dynamic_max_conns_by_account_concurrency_enabled", true) diff --git a/deploy/config.example.yaml b/deploy/config.example.yaml index a35ab27d0..022a19d7b 100644 --- a/deploy/config.example.yaml +++ b/deploy/config.example.yaml @@ -222,7 +222,7 @@ gateway: responses_websockets: false responses_websockets_v2: true # 连接池参数(按账号池化复用) - max_conns_per_account: 24 + max_conns_per_account: 128 min_idle_per_account: 4 max_idle_per_account: 12 # 是否按账号并发动态计算连接池上限: From ee3d3795c2c1cee5437001d1f251de1c14b62da3 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 09:44:31 +0800 Subject: [PATCH 015/120] =?UTF-8?q?perf(ws):=20=E4=BC=98=E5=8C=96=20OpenAI?= =?UTF-8?q?=20WS=20v2=20=E5=8D=8F=E8=AE=AE=20JSON=20=E7=83=AD=E8=B7=AF?= =?UTF-8?q?=E5=BE=84?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/handler/gateway_handler.go | 60 +-- .../handler/openai_gateway_handler.go | 16 +- .../service/openai_gateway_service.go | 51 +-- .../internal/service/openai_tool_corrector.go | 348 +++++++++--------- 4 files changed, 207 insertions(+), 268 deletions(-) diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 38c4e234c..459fb60f6 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "net/http" + "strconv" "strings" "time" @@ -956,20 +957,8 @@ func (h *GatewayHandler) handleStreamingAwareError(c *gin.Context, status int, e // Stream already started, send error as SSE event then close flusher, ok := c.Writer.(http.Flusher) if ok { - // Send error event in SSE format with proper JSON marshaling - errorData := map[string]any{ - "type": "error", - "error": map[string]string{ - "type": errType, - "message": message, - }, - } - jsonBytes, err := json.Marshal(errorData) - if err != nil { - _ = c.Error(err) - return - } - errorEvent := fmt.Sprintf("data: %s\n\n", string(jsonBytes)) + // SSE 错误事件固定 schema,使用 Quote 直拼可避免额外 Marshal 分配。 + errorEvent := `data: {"type":"error","error":{"type":` + strconv.Quote(errType) + `,"message":` + strconv.Quote(message) + `}}` + "\n\n" if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil { _ = c.Error(err) } @@ -1217,24 +1206,8 @@ func sendMockInterceptStream(c *gin.Context, model string, interceptType Interce textDeltas = []string{"New", " Conversation"} } - // Build message_start event with proper JSON marshaling - messageStart := map[string]any{ - "type": "message_start", - "message": map[string]any{ - "id": msgID, - "type": "message", - "role": "assistant", - "model": model, - "content": []any{}, - "stop_reason": nil, - "stop_sequence": nil, - "usage": map[string]int{ - "input_tokens": 10, - "output_tokens": 0, - }, - }, - } - messageStartJSON, _ := json.Marshal(messageStart) + // Build message_start event with fixed schema. + messageStartJSON := `{"type":"message_start","message":{"id":` + strconv.Quote(msgID) + `,"type":"message","role":"assistant","model":` + strconv.Quote(model) + `,"content":[],"stop_reason":null,"stop_sequence":null,"usage":{"input_tokens":10,"output_tokens":0}}}` // Build events events := []string{ @@ -1244,31 +1217,12 @@ func sendMockInterceptStream(c *gin.Context, model string, interceptType Interce // Add text deltas for _, text := range textDeltas { - delta := map[string]any{ - "type": "content_block_delta", - "index": 0, - "delta": map[string]string{ - "type": "text_delta", - "text": text, - }, - } - deltaJSON, _ := json.Marshal(delta) + deltaJSON := `{"type":"content_block_delta","index":0,"delta":{"type":"text_delta","text":` + strconv.Quote(text) + `}}` events = append(events, `event: content_block_delta`+"\n"+`data: `+string(deltaJSON)) } // Add final events - messageDelta := map[string]any{ - "type": "message_delta", - "delta": map[string]any{ - "stop_reason": "end_turn", - "stop_sequence": nil, - }, - "usage": map[string]int{ - "input_tokens": 10, - "output_tokens": outputTokens, - }, - } - messageDeltaJSON, _ := json.Marshal(messageDelta) + messageDeltaJSON := `{"type":"message_delta","delta":{"stop_reason":"end_turn","stop_sequence":null},"usage":{"input_tokens":10,"output_tokens":` + strconv.Itoa(outputTokens) + `}}` events = append(events, `event: content_block_stop`+"\n"+`data: {"index":0,"type":"content_block_stop"}`, diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index b039d9f6f..dbfbadeb7 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -8,6 +8,7 @@ import ( "io" "net/http" "runtime/debug" + "strconv" "strings" "time" @@ -868,19 +869,8 @@ func (h *OpenAIGatewayHandler) handleStreamingAwareError(c *gin.Context, status // Stream already started, send error as SSE event then close flusher, ok := c.Writer.(http.Flusher) if ok { - // Send error event in OpenAI SSE format with proper JSON marshaling - errorData := map[string]any{ - "error": map[string]string{ - "type": errType, - "message": message, - }, - } - jsonBytes, err := json.Marshal(errorData) - if err != nil { - _ = c.Error(err) - return - } - errorEvent := fmt.Sprintf("event: error\ndata: %s\n\n", string(jsonBytes)) + // SSE 错误事件固定 schema,使用 Quote 直拼可避免额外 Marshal 分配。 + errorEvent := "event: error\ndata: " + `{"error":{"type":` + strconv.Quote(errType) + `,"message":` + strconv.Quote(message) + `}}` + "\n\n" if _, err := fmt.Fprint(c.Writer, errorEvent); err != nil { _ = c.Error(err) } diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 2bc7d9641..60020d77a 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -2205,6 +2205,7 @@ func (s *OpenAIGatewayService) handleStreamingResponsePassthrough( for scanner.Scan() { line := scanner.Text() if data, ok := extractOpenAISSEDataLine(line); ok { + dataBytes := []byte(data) trimmedData := strings.TrimSpace(data) if trimmedData == "[DONE]" { sawDone = true @@ -2213,7 +2214,7 @@ func (s *OpenAIGatewayService) handleStreamingResponsePassthrough( ms := int(time.Since(startTime).Milliseconds()) firstTokenMs = &ms } - s.parseSSEUsage(data, usage) + s.parseSSEUsageBytes(dataBytes, usage) } if !clientDisconnected { @@ -2706,19 +2707,9 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp return } errorEventSent = true - payload := map[string]any{ - "type": "error", - "sequence_number": 0, - "error": map[string]any{ - "type": "upstream_error", - "message": reason, - "code": reason, - }, - } - if b, err := json.Marshal(payload); err == nil { - _, _ = fmt.Fprintf(w, "data: %s\n\n", b) - flusher.Flush() - } + payload := `{"type":"error","sequence_number":0,"error":{"type":"upstream_error","message":` + strconv.Quote(reason) + `,"code":` + strconv.Quote(reason) + `}}` + _, _ = fmt.Fprintf(w, "data: %s\n\n", payload) + flusher.Flush() } needModelReplace := originalModel != mappedModel @@ -2761,10 +2752,13 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp line = s.replaceModelInSSELine(line, mappedModel, originalModel) } + dataBytes := []byte(data) + // Correct Codex tool calls if needed (apply_patch -> edit, etc.) - if correctedData, corrected := s.toolCorrector.CorrectToolCallsInSSEData(data); corrected { - data = correctedData - line = "data: " + correctedData + if correctedData, corrected := s.toolCorrector.CorrectToolCallsInSSEBytes(dataBytes); corrected { + dataBytes = correctedData + data = string(correctedData) + line = "data: " + data } // 写入客户端(客户端断开后继续 drain 上游) @@ -2782,7 +2776,7 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp ms := int(time.Since(startTime).Milliseconds()) firstTokenMs = &ms } - s.parseSSEUsage(data, usage) + s.parseSSEUsageBytes(dataBytes, usage) } else { // Forward non-data lines as-is if !clientDisconnected { @@ -2882,10 +2876,9 @@ func (s *OpenAIGatewayService) correctToolCallsInResponseBody(body []byte) []byt return body } - bodyStr := string(body) - corrected, changed := s.toolCorrector.CorrectToolCallsInSSEData(bodyStr) + corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(body) if changed { - return []byte(corrected) + return corrected } return body } @@ -3036,16 +3029,10 @@ func extractCodexFinalResponse(body string) ([]byte, bool) { if data == "" || data == "[DONE]" { continue } - var event struct { - Type string `json:"type"` - Response json.RawMessage `json:"response"` - } - if json.Unmarshal([]byte(data), &event) != nil { - continue - } - if event.Type == "response.done" || event.Type == "response.completed" { - if len(event.Response) > 0 { - return event.Response, true + eventType := gjson.Get(data, "type").String() + if eventType == "response.done" || eventType == "response.completed" { + if response := gjson.Get(data, "response"); response.Exists() && response.Type == gjson.JSON && response.Raw != "" { + return []byte(response.Raw), true } } } @@ -3063,7 +3050,7 @@ func (s *OpenAIGatewayService) parseSSEUsageFromBody(body string) *OpenAIUsage { if data == "" || data == "[DONE]" { continue } - s.parseSSEUsage(data, usage) + s.parseSSEUsageBytes([]byte(data), usage) } return usage } diff --git a/backend/internal/service/openai_tool_corrector.go b/backend/internal/service/openai_tool_corrector.go index aa811e481..348723a65 100644 --- a/backend/internal/service/openai_tool_corrector.go +++ b/backend/internal/service/openai_tool_corrector.go @@ -2,11 +2,14 @@ package service import ( "bytes" - "encoding/json" "fmt" + "strconv" + "strings" "sync" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" + "github.com/tidwall/gjson" + "github.com/tidwall/sjson" ) // codexToolNameMapping 定义 Codex 原生工具名称到 OpenCode 工具名称的映射 @@ -79,184 +82,185 @@ func (c *CodexToolCorrector) CorrectToolCallsInSSEBytes(data []byte) ([]byte, bo if !mayContainToolCallPayload(data) { return data, false } - - // 尝试解析 JSON - var payload map[string]any - if err := json.Unmarshal(data, &payload); err != nil { - // 不是有效的 JSON,直接返回原数据 + if !gjson.ValidBytes(data) { + // 不是有效 JSON,直接返回原数据 return data, false } - corrected := c.correctToolCallsInPayload(payload) + updated := data + corrected := false + collect := func(changed bool, next []byte) { + if changed { + corrected = true + updated = next + } + } - if !corrected { - return data, false + if next, changed := c.correctToolCallsArrayAtPath(updated, "tool_calls"); changed { + collect(changed, next) + } + if next, changed := c.correctFunctionAtPath(updated, "function_call"); changed { + collect(changed, next) + } + if next, changed := c.correctToolCallsArrayAtPath(updated, "delta.tool_calls"); changed { + collect(changed, next) + } + if next, changed := c.correctFunctionAtPath(updated, "delta.function_call"); changed { + collect(changed, next) } - // 序列化回 JSON - correctedBytes, err := json.Marshal(payload) - if err != nil { - logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Failed to marshal corrected data: %v", err) - return data, false + choicesCount := int(gjson.GetBytes(updated, "choices.#").Int()) + for i := 0; i < choicesCount; i++ { + prefix := "choices." + strconv.Itoa(i) + if next, changed := c.correctToolCallsArrayAtPath(updated, prefix+".message.tool_calls"); changed { + collect(changed, next) + } + if next, changed := c.correctFunctionAtPath(updated, prefix+".message.function_call"); changed { + collect(changed, next) + } + if next, changed := c.correctToolCallsArrayAtPath(updated, prefix+".delta.tool_calls"); changed { + collect(changed, next) + } + if next, changed := c.correctFunctionAtPath(updated, prefix+".delta.function_call"); changed { + collect(changed, next) + } } - return correctedBytes, true + if !corrected { + return data, false + } + return updated, true } func mayContainToolCallPayload(data []byte) bool { - // 快速路径:多数 token / 文本事件不包含工具字段,避免进入 Unmarshal 热路径。 + // 快速路径:多数 token / 文本事件不包含工具字段,避免进入 JSON 解析热路径。 return bytes.Contains(data, []byte(`"tool_calls"`)) || bytes.Contains(data, []byte(`"function_call"`)) || bytes.Contains(data, []byte(`"function":{"name"`)) } -func (c *CodexToolCorrector) correctToolCallsInPayload(payload map[string]any) bool { - if len(payload) == 0 { - return false +// correctToolCallsArrayAtPath 修正指定路径下 tool_calls 数组中的工具名称。 +func (c *CodexToolCorrector) correctToolCallsArrayAtPath(data []byte, toolCallsPath string) ([]byte, bool) { + count := int(gjson.GetBytes(data, toolCallsPath+".#").Int()) + if count <= 0 { + return data, false } + updated := data corrected := false - - // 处理 tool_calls 数组 - if toolCalls, ok := payload["tool_calls"].([]any); ok { - if c.correctToolCallsArray(toolCalls) { + for i := 0; i < count; i++ { + functionPath := toolCallsPath + "." + strconv.Itoa(i) + ".function" + if next, changed := c.correctFunctionAtPath(updated, functionPath); changed { + updated = next corrected = true } } - - // 处理 function_call 对象 - if functionCall, ok := payload["function_call"].(map[string]any); ok { - if c.correctFunctionCall(functionCall) { - corrected = true - } - } - - // 处理 delta.tool_calls - if delta, ok := payload["delta"].(map[string]any); ok { - if toolCalls, ok := delta["tool_calls"].([]any); ok { - if c.correctToolCallsArray(toolCalls) { - corrected = true - } - } - if functionCall, ok := delta["function_call"].(map[string]any); ok { - if c.correctFunctionCall(functionCall) { - corrected = true - } - } - } - - // 处理 choices[].message.tool_calls 和 choices[].delta.tool_calls - if choices, ok := payload["choices"].([]any); ok { - for _, choice := range choices { - if choiceMap, ok := choice.(map[string]any); ok { - // 处理 message 中的工具调用 - if message, ok := choiceMap["message"].(map[string]any); ok { - if toolCalls, ok := message["tool_calls"].([]any); ok { - if c.correctToolCallsArray(toolCalls) { - corrected = true - } - } - if functionCall, ok := message["function_call"].(map[string]any); ok { - if c.correctFunctionCall(functionCall) { - corrected = true - } - } - } - // 处理 delta 中的工具调用 - if delta, ok := choiceMap["delta"].(map[string]any); ok { - if toolCalls, ok := delta["tool_calls"].([]any); ok { - if c.correctToolCallsArray(toolCalls) { - corrected = true - } - } - if functionCall, ok := delta["function_call"].(map[string]any); ok { - if c.correctFunctionCall(functionCall) { - corrected = true - } - } - } - } - } - } - return corrected + return updated, corrected } -// correctToolCallsArray 修正工具调用数组中的工具名称 -func (c *CodexToolCorrector) correctToolCallsArray(toolCalls []any) bool { - corrected := false - for _, toolCall := range toolCalls { - if toolCallMap, ok := toolCall.(map[string]any); ok { - if function, ok := toolCallMap["function"].(map[string]any); ok { - if c.correctFunctionCall(function) { - corrected = true - } - } - } +// correctFunctionAtPath 修正指定路径下单个函数调用的工具名称和参数。 +func (c *CodexToolCorrector) correctFunctionAtPath(data []byte, functionPath string) ([]byte, bool) { + namePath := functionPath + ".name" + nameResult := gjson.GetBytes(data, namePath) + if !nameResult.Exists() || nameResult.Type != gjson.String { + return data, false } - return corrected -} - -// correctFunctionCall 修正单个函数调用的工具名称和参数 -func (c *CodexToolCorrector) correctFunctionCall(functionCall map[string]any) bool { - name, ok := functionCall["name"].(string) - if !ok || name == "" { - return false + name := strings.TrimSpace(nameResult.Str) + if name == "" { + return data, false } - + updated := data corrected := false // 查找并修正工具名称 if correctName, found := codexToolNameMapping[name]; found { - functionCall["name"] = correctName - c.recordCorrection(name, correctName) - corrected = true - name = correctName // 使用修正后的名称进行参数修正 + if next, err := sjson.SetBytes(updated, namePath, correctName); err == nil { + updated = next + c.recordCorrection(name, correctName) + corrected = true + name = correctName // 使用修正后的名称进行参数修正 + } } // 修正工具参数(基于工具名称) - if c.correctToolParameters(name, functionCall) { + if next, changed := c.correctToolParametersAtPath(updated, functionPath+".arguments", name); changed { + updated = next corrected = true } - - return corrected + return updated, corrected } -// correctToolParameters 修正工具参数以符合 OpenCode 规范 -func (c *CodexToolCorrector) correctToolParameters(toolName string, functionCall map[string]any) bool { - arguments, ok := functionCall["arguments"] - if !ok { - return false +// correctToolParametersAtPath 修正指定路径下 arguments 参数。 +func (c *CodexToolCorrector) correctToolParametersAtPath(data []byte, argumentsPath, toolName string) ([]byte, bool) { + if toolName != "bash" && toolName != "edit" { + return data, false } - // arguments 可能是字符串(JSON)或已解析的 map - var argsMap map[string]any - switch v := arguments.(type) { - case string: - // 解析 JSON 字符串 - if err := json.Unmarshal([]byte(v), &argsMap); err != nil { - return false + args := gjson.GetBytes(data, argumentsPath) + if !args.Exists() { + return data, false + } + + switch args.Type { + case gjson.String: + argsJSON := strings.TrimSpace(args.Str) + if !gjson.Valid(argsJSON) { + return data, false + } + if !gjson.Parse(argsJSON).IsObject() { + return data, false } - case map[string]any: - argsMap = v + nextArgsJSON, corrected := c.correctToolArgumentsJSON(argsJSON, toolName) + if !corrected { + return data, false + } + next, err := sjson.SetBytes(data, argumentsPath, nextArgsJSON) + if err != nil { + return data, false + } + return next, true + case gjson.JSON: + if !args.IsObject() || !gjson.Valid(args.Raw) { + return data, false + } + nextArgsJSON, corrected := c.correctToolArgumentsJSON(args.Raw, toolName) + if !corrected { + return data, false + } + next, err := sjson.SetRawBytes(data, argumentsPath, []byte(nextArgsJSON)) + if err != nil { + return data, false + } + return next, true default: - return false + return data, false } +} +// correctToolArgumentsJSON 修正工具参数 JSON(对象字符串),返回修正后的 JSON 与是否变更。 +func (c *CodexToolCorrector) correctToolArgumentsJSON(argsJSON, toolName string) (string, bool) { + if !gjson.Valid(argsJSON) { + return argsJSON, false + } + if !gjson.Parse(argsJSON).IsObject() { + return argsJSON, false + } + + updated := argsJSON corrected := false // 根据工具名称应用特定的参数修正规则 switch toolName { case "bash": // OpenCode bash 支持 workdir;有些来源会输出 work_dir。 - if _, hasWorkdir := argsMap["workdir"]; !hasWorkdir { - if workDir, exists := argsMap["work_dir"]; exists { - argsMap["workdir"] = workDir - delete(argsMap, "work_dir") + if !gjson.Get(updated, "workdir").Exists() { + if next, changed := moveJSONField(updated, "work_dir", "workdir"); changed { + updated = next corrected = true logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'work_dir' to 'workdir' in bash tool") } } else { - if _, exists := argsMap["work_dir"]; exists { - delete(argsMap, "work_dir") + if next, changed := deleteJSONField(updated, "work_dir"); changed { + updated = next corrected = true logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Removed duplicate 'work_dir' parameter from bash tool") } @@ -264,67 +268,71 @@ func (c *CodexToolCorrector) correctToolParameters(toolName string, functionCall case "edit": // OpenCode edit 参数为 filePath/oldString/newString(camelCase)。 - if _, exists := argsMap["filePath"]; !exists { - if filePath, exists := argsMap["file_path"]; exists { - argsMap["filePath"] = filePath - delete(argsMap, "file_path") + if !gjson.Get(updated, "filePath").Exists() { + if next, changed := moveJSONField(updated, "file_path", "filePath"); changed { + updated = next corrected = true logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'file_path' to 'filePath' in edit tool") - } else if filePath, exists := argsMap["path"]; exists { - argsMap["filePath"] = filePath - delete(argsMap, "path") + } else if next, changed := moveJSONField(updated, "path", "filePath"); changed { + updated = next corrected = true logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'path' to 'filePath' in edit tool") - } else if filePath, exists := argsMap["file"]; exists { - argsMap["filePath"] = filePath - delete(argsMap, "file") + } else if next, changed := moveJSONField(updated, "file", "filePath"); changed { + updated = next corrected = true logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'file' to 'filePath' in edit tool") } } - if _, exists := argsMap["oldString"]; !exists { - if oldString, exists := argsMap["old_string"]; exists { - argsMap["oldString"] = oldString - delete(argsMap, "old_string") - corrected = true - logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'old_string' to 'oldString' in edit tool") - } + if next, changed := moveJSONField(updated, "old_string", "oldString"); changed { + updated = next + corrected = true + logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'old_string' to 'oldString' in edit tool") } - if _, exists := argsMap["newString"]; !exists { - if newString, exists := argsMap["new_string"]; exists { - argsMap["newString"] = newString - delete(argsMap, "new_string") - corrected = true - logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'new_string' to 'newString' in edit tool") - } + if next, changed := moveJSONField(updated, "new_string", "newString"); changed { + updated = next + corrected = true + logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'new_string' to 'newString' in edit tool") } - if _, exists := argsMap["replaceAll"]; !exists { - if replaceAll, exists := argsMap["replace_all"]; exists { - argsMap["replaceAll"] = replaceAll - delete(argsMap, "replace_all") - corrected = true - logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'replace_all' to 'replaceAll' in edit tool") - } + if next, changed := moveJSONField(updated, "replace_all", "replaceAll"); changed { + updated = next + corrected = true + logger.LegacyPrintf("service.openai_tool_corrector", "[CodexToolCorrector] Renamed 'replace_all' to 'replaceAll' in edit tool") } } + return updated, corrected +} - // 如果修正了参数,需要重新序列化 - if corrected { - if _, wasString := arguments.(string); wasString { - // 原本是字符串,序列化回字符串 - if newArgsJSON, err := json.Marshal(argsMap); err == nil { - functionCall["arguments"] = string(newArgsJSON) - } - } else { - // 原本是 map,直接赋值 - functionCall["arguments"] = argsMap - } +func moveJSONField(input, from, to string) (string, bool) { + if gjson.Get(input, to).Exists() { + return input, false + } + src := gjson.Get(input, from) + if !src.Exists() { + return input, false + } + next, err := sjson.SetRaw(input, to, src.Raw) + if err != nil { + return input, false + } + next, err = sjson.Delete(next, from) + if err != nil { + return input, false } + return next, true +} - return corrected +func deleteJSONField(input, path string) (string, bool) { + if !gjson.Get(input, path).Exists() { + return input, false + } + next, err := sjson.Delete(input, path) + if err != nil { + return input, false + } + return next, true } // recordCorrection 记录一次工具名称修正 From e5d6325cd88f2f173e098be2abdbd70be5633e88 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 10:31:30 +0800 Subject: [PATCH 016/120] =?UTF-8?q?chore(version):=20=E6=9B=B4=E6=96=B0?= =?UTF-8?q?=E7=89=88=E6=9C=AC=E5=8F=B7=E8=87=B3=200.1.85.3?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 0fbc54109..a6b531056 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.85.2 +0.1.85.3 From d182a5c9285bf2570be82eb9d3f8cd056f30ec0d Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 11:03:49 +0800 Subject: [PATCH 017/120] =?UTF-8?q?feat(openai):=20=E8=90=BD=E5=9C=B0JSON?= =?UTF-8?q?=E8=A7=A3=E6=9E=90=E4=BC=98=E5=8C=96=E5=B9=B6=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?WS=E7=BB=AD=E9=93=BE=E6=A0=A1=E9=AA=8C?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 合并 function_call_output 校验入口,减少重复遍历 input - WS ingress 改为单次 json.Unmarshal,并统一字段提取 - usage 提取统一为 gjson 路径,补充 benchmark 对比 - 修复 OpenAIWSMode 标记与相关测试断言 - 修复 lint 问题(errcheck/ineffassign) --- .../handler/openai_gateway_handler.go | 14 +- .../service/openai_gateway_service.go | 69 ++-- ...openai_json_optimization_benchmark_test.go | 357 ++++++++++++++++++ .../service/openai_tool_continuation.go | 229 +++++++---- .../internal/service/openai_ws_forwarder.go | 34 +- ...penai_ws_forwarder_ingress_session_test.go | 20 +- .../openai_ws_forwarder_success_test.go | 15 + 7 files changed, 596 insertions(+), 142 deletions(-) create mode 100644 backend/internal/service/openai_json_optimization_benchmark_test.go diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index dbfbadeb7..a0155de4d 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -150,18 +150,18 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { var reqBody map[string]any if err := json.Unmarshal(body, &reqBody); err == nil { c.Set(service.OpenAIParsedRequestBodyKey, reqBody) - if service.HasFunctionCallOutput(reqBody) { + validation := service.ValidateFunctionCallOutputContext(reqBody) + if validation.HasFunctionCallOutput { previousResponseID, _ := reqBody["previous_response_id"].(string) - if strings.TrimSpace(previousResponseID) == "" && !service.HasToolCallContext(reqBody) { - if service.HasFunctionCallOutputMissingCallID(reqBody) { + if strings.TrimSpace(previousResponseID) == "" && !validation.HasToolCallContext { + if validation.HasFunctionCallOutputMissingCallID { reqLog.Warn("openai.request_validation_failed", zap.String("reason", "function_call_output_missing_call_id"), ) h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id") return } - callIDs := service.FunctionCallOutputCallIDs(reqBody) - if !service.HasItemReferenceForCallIDs(reqBody, callIDs) { + if !validation.HasItemReferenceForAllCallIDs { reqLog.Warn("openai.request_validation_failed", zap.String("reason", "function_call_output_missing_item_reference"), ) @@ -492,7 +492,9 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { reqLog.Warn("openai.websocket_accept_failed", zap.Error(err)) return } - defer wsConn.CloseNow() + defer func() { + _ = wsConn.CloseNow() + }() wsConn.SetReadLimit(128 * 1024 * 1024) ctx := c.Request.Context() diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 60020d77a..f9551a4b0 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -2287,19 +2287,8 @@ func (s *OpenAIGatewayService) handleNonStreamingResponsePassthrough( usage := &OpenAIUsage{} usageParsed := false if len(body) > 0 { - var response struct { - Usage struct { - InputTokens int `json:"input_tokens"` - OutputTokens int `json:"output_tokens"` - InputTokenDetails struct { - CachedTokens int `json:"cached_tokens"` - } `json:"input_tokens_details"` - } `json:"usage"` - } - if json.Unmarshal(body, &response) == nil { - usage.InputTokens = response.Usage.InputTokens - usage.OutputTokens = response.Usage.OutputTokens - usage.CacheReadInputTokens = response.Usage.InputTokenDetails.CachedTokens + if parsedUsage, ok := extractOpenAIUsageFromJSONBytes(body); ok { + *usage = parsedUsage usageParsed = true } } @@ -2904,6 +2893,23 @@ func (s *OpenAIGatewayService) parseSSEUsageBytes(data []byte, usage *OpenAIUsag usage.CacheReadInputTokens = int(gjson.GetBytes(data, "response.usage.input_tokens_details.cached_tokens").Int()) } +func extractOpenAIUsageFromJSONBytes(body []byte) (OpenAIUsage, bool) { + if len(body) == 0 || !gjson.ValidBytes(body) { + return OpenAIUsage{}, false + } + values := gjson.GetManyBytes( + body, + "usage.input_tokens", + "usage.output_tokens", + "usage.input_tokens_details.cached_tokens", + ) + return OpenAIUsage{ + InputTokens: int(values[0].Int()), + OutputTokens: int(values[1].Int()), + CacheReadInputTokens: int(values[2].Int()), + }, true +} + func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*OpenAIUsage, error) { maxBytes := resolveUpstreamResponseReadLimit(s.cfg) body, err := readUpstreamResponseBodyLimited(resp.Body, maxBytes) @@ -2927,25 +2933,11 @@ func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, r } } - // Parse usage - var response struct { - Usage struct { - InputTokens int `json:"input_tokens"` - OutputTokens int `json:"output_tokens"` - InputTokenDetails struct { - CachedTokens int `json:"cached_tokens"` - } `json:"input_tokens_details"` - } `json:"usage"` - } - if err := json.Unmarshal(body, &response); err != nil { - return nil, fmt.Errorf("parse response: %w", err) - } - - usage := &OpenAIUsage{ - InputTokens: response.Usage.InputTokens, - OutputTokens: response.Usage.OutputTokens, - CacheReadInputTokens: response.Usage.InputTokenDetails.CachedTokens, + usageValue, usageOK := extractOpenAIUsageFromJSONBytes(body) + if !usageOK { + return nil, fmt.Errorf("parse response: invalid json response") } + usage := &usageValue // Replace model in response if needed if originalModel != mappedModel { @@ -2977,19 +2969,8 @@ func (s *OpenAIGatewayService) handleOAuthSSEToJSON(resp *http.Response, c *gin. usage := &OpenAIUsage{} if ok { - var response struct { - Usage struct { - InputTokens int `json:"input_tokens"` - OutputTokens int `json:"output_tokens"` - InputTokenDetails struct { - CachedTokens int `json:"cached_tokens"` - } `json:"input_tokens_details"` - } `json:"usage"` - } - if err := json.Unmarshal(finalResponse, &response); err == nil { - usage.InputTokens = response.Usage.InputTokens - usage.OutputTokens = response.Usage.OutputTokens - usage.CacheReadInputTokens = response.Usage.InputTokenDetails.CachedTokens + if parsedUsage, parsed := extractOpenAIUsageFromJSONBytes(finalResponse); parsed { + *usage = parsedUsage } body = finalResponse if originalModel != mappedModel { diff --git a/backend/internal/service/openai_json_optimization_benchmark_test.go b/backend/internal/service/openai_json_optimization_benchmark_test.go new file mode 100644 index 000000000..1737804b8 --- /dev/null +++ b/backend/internal/service/openai_json_optimization_benchmark_test.go @@ -0,0 +1,357 @@ +package service + +import ( + "encoding/json" + "strconv" + "strings" + "testing" + + "github.com/tidwall/gjson" +) + +var ( + benchmarkToolContinuationBoolSink bool + benchmarkWSParseStringSink string + benchmarkWSParseMapSink map[string]any + benchmarkUsageSink OpenAIUsage +) + +func BenchmarkToolContinuationValidationLegacy(b *testing.B) { + reqBody := benchmarkToolContinuationRequestBody() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkToolContinuationBoolSink = legacyValidateFunctionCallOutputContext(reqBody) + } +} + +func BenchmarkToolContinuationValidationOptimized(b *testing.B) { + reqBody := benchmarkToolContinuationRequestBody() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + benchmarkToolContinuationBoolSink = optimizedValidateFunctionCallOutputContext(reqBody) + } +} + +func BenchmarkWSIngressPayloadParseLegacy(b *testing.B) { + raw := benchmarkWSIngressPayloadBytes() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + eventType, model, promptCacheKey, previousResponseID, payload, err := legacyParseWSIngressPayload(raw) + if err == nil { + benchmarkWSParseStringSink = eventType + model + promptCacheKey + previousResponseID + benchmarkWSParseMapSink = payload + } + } +} + +func BenchmarkWSIngressPayloadParseOptimized(b *testing.B) { + raw := benchmarkWSIngressPayloadBytes() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + eventType, model, promptCacheKey, previousResponseID, payload, err := optimizedParseWSIngressPayload(raw) + if err == nil { + benchmarkWSParseStringSink = eventType + model + promptCacheKey + previousResponseID + benchmarkWSParseMapSink = payload + } + } +} + +func BenchmarkOpenAIUsageExtractLegacy(b *testing.B) { + body := benchmarkOpenAIUsageJSONBytes() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + usage, ok := legacyExtractOpenAIUsageFromJSONBytes(body) + if ok { + benchmarkUsageSink = usage + } + } +} + +func BenchmarkOpenAIUsageExtractOptimized(b *testing.B) { + body := benchmarkOpenAIUsageJSONBytes() + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + usage, ok := extractOpenAIUsageFromJSONBytes(body) + if ok { + benchmarkUsageSink = usage + } + } +} + +func benchmarkToolContinuationRequestBody() map[string]any { + input := make([]any, 0, 64) + for i := 0; i < 24; i++ { + input = append(input, map[string]any{ + "type": "text", + "text": "benchmark text", + }) + } + for i := 0; i < 10; i++ { + callID := "call_" + strconv.Itoa(i) + input = append(input, map[string]any{ + "type": "tool_call", + "call_id": callID, + }) + input = append(input, map[string]any{ + "type": "function_call_output", + "call_id": callID, + }) + input = append(input, map[string]any{ + "type": "item_reference", + "id": callID, + }) + } + return map[string]any{ + "model": "gpt-5.3-codex", + "input": input, + } +} + +func benchmarkWSIngressPayloadBytes() []byte { + return []byte(`{"type":"response.create","model":"gpt-5.3-codex","prompt_cache_key":"cache_bench","previous_response_id":"resp_prev_bench","input":[{"type":"message","role":"user","content":[{"type":"input_text","text":"hello"}]}]}`) +} + +func benchmarkOpenAIUsageJSONBytes() []byte { + return []byte(`{"id":"resp_bench","object":"response","model":"gpt-5.3-codex","usage":{"input_tokens":3210,"output_tokens":987,"input_tokens_details":{"cached_tokens":456}}}`) +} + +func legacyValidateFunctionCallOutputContext(reqBody map[string]any) bool { + if !legacyHasFunctionCallOutput(reqBody) { + return true + } + previousResponseID, _ := reqBody["previous_response_id"].(string) + if strings.TrimSpace(previousResponseID) != "" { + return true + } + if legacyHasToolCallContext(reqBody) { + return true + } + if legacyHasFunctionCallOutputMissingCallID(reqBody) { + return false + } + callIDs := legacyFunctionCallOutputCallIDs(reqBody) + return legacyHasItemReferenceForCallIDs(reqBody, callIDs) +} + +func optimizedValidateFunctionCallOutputContext(reqBody map[string]any) bool { + validation := ValidateFunctionCallOutputContext(reqBody) + if !validation.HasFunctionCallOutput { + return true + } + previousResponseID, _ := reqBody["previous_response_id"].(string) + if strings.TrimSpace(previousResponseID) != "" { + return true + } + if validation.HasToolCallContext { + return true + } + if validation.HasFunctionCallOutputMissingCallID { + return false + } + return validation.HasItemReferenceForAllCallIDs +} + +func legacyHasFunctionCallOutput(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType == "function_call_output" { + return true + } + } + return false +} + +func legacyHasToolCallContext(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "tool_call" && itemType != "function_call" { + continue + } + if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + return true + } + } + return false +} + +func legacyFunctionCallOutputCallIDs(reqBody map[string]any) []string { + if reqBody == nil { + return nil + } + input, ok := reqBody["input"].([]any) + if !ok { + return nil + } + ids := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "function_call_output" { + continue + } + if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + ids[callID] = struct{}{} + } + } + if len(ids) == 0 { + return nil + } + callIDs := make([]string, 0, len(ids)) + for id := range ids { + callIDs = append(callIDs, id) + } + return callIDs +} + +func legacyHasFunctionCallOutputMissingCallID(reqBody map[string]any) bool { + if reqBody == nil { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "function_call_output" { + continue + } + callID, _ := itemMap["call_id"].(string) + if strings.TrimSpace(callID) == "" { + return true + } + } + return false +} + +func legacyHasItemReferenceForCallIDs(reqBody map[string]any, callIDs []string) bool { + if reqBody == nil || len(callIDs) == 0 { + return false + } + input, ok := reqBody["input"].([]any) + if !ok { + return false + } + referenceIDs := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { + continue + } + itemType, _ := itemMap["type"].(string) + if itemType != "item_reference" { + continue + } + idValue, _ := itemMap["id"].(string) + idValue = strings.TrimSpace(idValue) + if idValue == "" { + continue + } + referenceIDs[idValue] = struct{}{} + } + if len(referenceIDs) == 0 { + return false + } + for _, callID := range callIDs { + if _, ok := referenceIDs[callID]; !ok { + return false + } + } + return true +} + +func legacyParseWSIngressPayload(raw []byte) (eventType, model, promptCacheKey, previousResponseID string, payload map[string]any, err error) { + values := gjson.GetManyBytes(raw, "type", "model", "prompt_cache_key", "previous_response_id") + eventType = strings.TrimSpace(values[0].String()) + if eventType == "" { + eventType = "response.create" + } + model = strings.TrimSpace(values[1].String()) + promptCacheKey = strings.TrimSpace(values[2].String()) + previousResponseID = strings.TrimSpace(values[3].String()) + payload = make(map[string]any) + if err = json.Unmarshal(raw, &payload); err != nil { + return "", "", "", "", nil, err + } + if _, exists := payload["type"]; !exists { + payload["type"] = "response.create" + } + return eventType, model, promptCacheKey, previousResponseID, payload, nil +} + +func optimizedParseWSIngressPayload(raw []byte) (eventType, model, promptCacheKey, previousResponseID string, payload map[string]any, err error) { + payload = make(map[string]any) + if err = json.Unmarshal(raw, &payload); err != nil { + return "", "", "", "", nil, err + } + eventType = openAIWSPayloadString(payload, "type") + if eventType == "" { + eventType = "response.create" + payload["type"] = eventType + } + model = openAIWSPayloadString(payload, "model") + promptCacheKey = openAIWSPayloadString(payload, "prompt_cache_key") + previousResponseID = openAIWSPayloadString(payload, "previous_response_id") + return eventType, model, promptCacheKey, previousResponseID, payload, nil +} + +func legacyExtractOpenAIUsageFromJSONBytes(body []byte) (OpenAIUsage, bool) { + var response struct { + Usage struct { + InputTokens int `json:"input_tokens"` + OutputTokens int `json:"output_tokens"` + InputTokenDetails struct { + CachedTokens int `json:"cached_tokens"` + } `json:"input_tokens_details"` + } `json:"usage"` + } + if err := json.Unmarshal(body, &response); err != nil { + return OpenAIUsage{}, false + } + return OpenAIUsage{ + InputTokens: response.Usage.InputTokens, + OutputTokens: response.Usage.OutputTokens, + CacheReadInputTokens: response.Usage.InputTokenDetails.CachedTokens, + }, true +} diff --git a/backend/internal/service/openai_tool_continuation.go b/backend/internal/service/openai_tool_continuation.go index e59082b2b..dea3c172d 100644 --- a/backend/internal/service/openai_tool_continuation.go +++ b/backend/internal/service/openai_tool_continuation.go @@ -2,6 +2,24 @@ package service import "strings" +// ToolContinuationSignals 聚合工具续链相关信号,避免重复遍历 input。 +type ToolContinuationSignals struct { + HasFunctionCallOutput bool + HasFunctionCallOutputMissingCallID bool + HasToolCallContext bool + HasItemReference bool + HasItemReferenceForAllCallIDs bool + FunctionCallOutputCallIDs []string +} + +// FunctionCallOutputValidation 汇总 function_call_output 关联性校验结果。 +type FunctionCallOutputValidation struct { + HasFunctionCallOutput bool + HasToolCallContext bool + HasFunctionCallOutputMissingCallID bool + HasItemReferenceForAllCallIDs bool +} + // NeedsToolContinuation 判定请求是否需要工具调用续链处理。 // 满足以下任一信号即视为续链:previous_response_id、input 内包含 function_call_output/item_reference、 // 或显式声明 tools/tool_choice。 @@ -18,29 +36,6 @@ func NeedsToolContinuation(reqBody map[string]any) bool { if hasToolChoiceSignal(reqBody) { return true } - if inputHasType(reqBody, "function_call_output") { - return true - } - if inputHasType(reqBody, "item_reference") { - return true - } - return false -} - -// HasFunctionCallOutput 判断 input 是否包含 function_call_output,用于触发续链校验。 -func HasFunctionCallOutput(reqBody map[string]any) bool { - if reqBody == nil { - return false - } - return inputHasType(reqBody, "function_call_output") -} - -// HasToolCallContext 判断 input 是否包含带 call_id 的 tool_call/function_call, -// 用于判断 function_call_output 是否具备可关联的上下文。 -func HasToolCallContext(reqBody map[string]any) bool { - if reqBody == nil { - return false - } input, ok := reqBody["input"].([]any) if !ok { return false @@ -51,74 +46,181 @@ func HasToolCallContext(reqBody map[string]any) bool { continue } itemType, _ := itemMap["type"].(string) - if itemType != "tool_call" && itemType != "function_call" { - continue - } - if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { + if itemType == "function_call_output" || itemType == "item_reference" { return true } } return false } -// FunctionCallOutputCallIDs 提取 input 中 function_call_output 的 call_id 集合。 -// 仅返回非空 call_id,用于与 item_reference.id 做匹配校验。 -func FunctionCallOutputCallIDs(reqBody map[string]any) []string { +// AnalyzeToolContinuationSignals 单次遍历 input,提取 function_call_output/tool_call/item_reference 相关信号。 +func AnalyzeToolContinuationSignals(reqBody map[string]any) ToolContinuationSignals { + signals := ToolContinuationSignals{} if reqBody == nil { - return nil + return signals } input, ok := reqBody["input"].([]any) if !ok { - return nil + return signals } - ids := make(map[string]struct{}) + + var callIDs map[string]struct{} + var referenceIDs map[string]struct{} + for _, item := range input { itemMap, ok := item.(map[string]any) if !ok { continue } itemType, _ := itemMap["type"].(string) - if itemType != "function_call_output" { - continue - } - if callID, ok := itemMap["call_id"].(string); ok && strings.TrimSpace(callID) != "" { - ids[callID] = struct{}{} + switch itemType { + case "tool_call", "function_call": + callID, _ := itemMap["call_id"].(string) + if strings.TrimSpace(callID) != "" { + signals.HasToolCallContext = true + } + case "function_call_output": + signals.HasFunctionCallOutput = true + callID, _ := itemMap["call_id"].(string) + callID = strings.TrimSpace(callID) + if callID == "" { + signals.HasFunctionCallOutputMissingCallID = true + continue + } + if callIDs == nil { + callIDs = make(map[string]struct{}) + } + callIDs[callID] = struct{}{} + case "item_reference": + signals.HasItemReference = true + idValue, _ := itemMap["id"].(string) + idValue = strings.TrimSpace(idValue) + if idValue == "" { + continue + } + if referenceIDs == nil { + referenceIDs = make(map[string]struct{}) + } + referenceIDs[idValue] = struct{}{} } } - if len(ids) == 0 { - return nil + + if len(callIDs) == 0 { + return signals } - result := make([]string, 0, len(ids)) - for id := range ids { - result = append(result, id) + signals.FunctionCallOutputCallIDs = make([]string, 0, len(callIDs)) + allReferenced := len(referenceIDs) > 0 + for callID := range callIDs { + signals.FunctionCallOutputCallIDs = append(signals.FunctionCallOutputCallIDs, callID) + if allReferenced { + if _, ok := referenceIDs[callID]; !ok { + allReferenced = false + } + } } - return result + signals.HasItemReferenceForAllCallIDs = allReferenced + return signals } -// HasFunctionCallOutputMissingCallID 判断是否存在缺少 call_id 的 function_call_output。 -func HasFunctionCallOutputMissingCallID(reqBody map[string]any) bool { +// ValidateFunctionCallOutputContext 为 handler 提供低开销校验结果: +// 1) 无 function_call_output 直接返回 +// 2) 若已存在 tool_call/function_call 上下文则提前返回 +// 3) 仅在无工具上下文时才构建 call_id / item_reference 集合 +func ValidateFunctionCallOutputContext(reqBody map[string]any) FunctionCallOutputValidation { + result := FunctionCallOutputValidation{} if reqBody == nil { - return false + return result } input, ok := reqBody["input"].([]any) if !ok { - return false + return result } + for _, item := range input { itemMap, ok := item.(map[string]any) if !ok { continue } itemType, _ := itemMap["type"].(string) - if itemType != "function_call_output" { + switch itemType { + case "function_call_output": + result.HasFunctionCallOutput = true + case "tool_call", "function_call": + callID, _ := itemMap["call_id"].(string) + if strings.TrimSpace(callID) != "" { + result.HasToolCallContext = true + } + } + if result.HasFunctionCallOutput && result.HasToolCallContext { + return result + } + } + + if !result.HasFunctionCallOutput || result.HasToolCallContext { + return result + } + + callIDs := make(map[string]struct{}) + referenceIDs := make(map[string]struct{}) + for _, item := range input { + itemMap, ok := item.(map[string]any) + if !ok { continue } - callID, _ := itemMap["call_id"].(string) - if strings.TrimSpace(callID) == "" { - return true + itemType, _ := itemMap["type"].(string) + switch itemType { + case "function_call_output": + callID, _ := itemMap["call_id"].(string) + callID = strings.TrimSpace(callID) + if callID == "" { + result.HasFunctionCallOutputMissingCallID = true + continue + } + callIDs[callID] = struct{}{} + case "item_reference": + idValue, _ := itemMap["id"].(string) + idValue = strings.TrimSpace(idValue) + if idValue == "" { + continue + } + referenceIDs[idValue] = struct{}{} } } - return false + + if len(callIDs) == 0 || len(referenceIDs) == 0 { + return result + } + allReferenced := true + for callID := range callIDs { + if _, ok := referenceIDs[callID]; !ok { + allReferenced = false + break + } + } + result.HasItemReferenceForAllCallIDs = allReferenced + return result +} + +// HasFunctionCallOutput 判断 input 是否包含 function_call_output,用于触发续链校验。 +func HasFunctionCallOutput(reqBody map[string]any) bool { + return AnalyzeToolContinuationSignals(reqBody).HasFunctionCallOutput +} + +// HasToolCallContext 判断 input 是否包含带 call_id 的 tool_call/function_call, +// 用于判断 function_call_output 是否具备可关联的上下文。 +func HasToolCallContext(reqBody map[string]any) bool { + return AnalyzeToolContinuationSignals(reqBody).HasToolCallContext +} + +// FunctionCallOutputCallIDs 提取 input 中 function_call_output 的 call_id 集合。 +// 仅返回非空 call_id,用于与 item_reference.id 做匹配校验。 +func FunctionCallOutputCallIDs(reqBody map[string]any) []string { + return AnalyzeToolContinuationSignals(reqBody).FunctionCallOutputCallIDs +} + +// HasFunctionCallOutputMissingCallID 判断是否存在缺少 call_id 的 function_call_output。 +func HasFunctionCallOutputMissingCallID(reqBody map[string]any) bool { + return AnalyzeToolContinuationSignals(reqBody).HasFunctionCallOutputMissingCallID } // HasItemReferenceForCallIDs 判断 item_reference.id 是否覆盖所有 call_id。 @@ -152,32 +254,13 @@ func HasItemReferenceForCallIDs(reqBody map[string]any, callIDs []string) bool { return false } for _, callID := range callIDs { - if _, ok := referenceIDs[callID]; !ok { + if _, ok := referenceIDs[strings.TrimSpace(callID)]; !ok { return false } } return true } -// inputHasType 判断 input 中是否存在指定类型的 item。 -func inputHasType(reqBody map[string]any, want string) bool { - input, ok := reqBody["input"].([]any) - if !ok { - return false - } - for _, item := range input { - itemMap, ok := item.(map[string]any) - if !ok { - continue - } - itemType, _ := itemMap["type"].(string) - if itemType == want { - return true - } - } - return false -} - // hasNonEmptyString 判断字段是否为非空字符串。 func hasNonEmptyString(value any) bool { stringValue, ok := value.(string) diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 063489fe6..68492da37 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -388,10 +388,14 @@ func openAIWSPayloadString(payload map[string]any, key string) string { return "" } switch v := raw.(type) { + case nil: + return "" case string: return strings.TrimSpace(v) + case []byte: + return strings.TrimSpace(string(v)) default: - return strings.TrimSpace(fmt.Sprintf("%v", raw)) + return "" } } @@ -1564,6 +1568,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( Model: originalModel, ReasoningEffort: extractOpenAIReasoningEffort(reqBody, originalModel), Stream: reqStream, + OpenAIWSMode: true, Duration: time.Since(startTime), FirstTokenMs: firstTokenMs, }, nil @@ -1628,10 +1633,15 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "empty websocket request payload", nil) } - values := gjson.GetManyBytes(trimmed, "type", "model", "prompt_cache_key", "previous_response_id") - eventType := strings.TrimSpace(values[0].String()) + payload := make(map[string]any) + if err := json.Unmarshal(trimmed, &payload); err != nil { + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) + } + + eventType := openAIWSPayloadString(payload, "type") if eventType == "" { eventType = "response.create" + payload["type"] = eventType } if eventType != "response.create" { if eventType == "response.append" { @@ -1648,7 +1658,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( ) } - originalModel := strings.TrimSpace(values[1].String()) + originalModel := openAIWSPayloadString(payload, "model") if originalModel == "" { return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( coderws.StatusPolicyViolation, @@ -1656,21 +1666,12 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( nil, ) } - promptCacheKey := strings.TrimSpace(values[2].String()) - previousResponseID := strings.TrimSpace(values[3].String()) - - payload := make(map[string]any) - if err := json.Unmarshal(trimmed, &payload); err != nil { - return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) - } - if _, exists := payload["type"]; !exists { - payload["type"] = "response.create" - } + promptCacheKey := openAIWSPayloadString(payload, "prompt_cache_key") + previousResponseID := openAIWSPayloadString(payload, "previous_response_id") if turnMetadata := strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)); turnMetadata != "" { setOpenAIWSTurnMetadata(payload, turnMetadata) } - mappedModel := originalModel - mappedModel = account.GetMappedModel(originalModel) + mappedModel := account.GetMappedModel(originalModel) if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { mappedModel = normalizedModel } @@ -1935,6 +1936,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( Model: originalModel, ReasoningEffort: extractOpenAIReasoningEffort(payload, originalModel), Stream: reqStream, + OpenAIWSMode: true, Duration: time.Since(turnStart), FirstTokenMs: firstTokenMs, }, nil diff --git a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go index 99227b8f0..d1aa7093c 100644 --- a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go +++ b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go @@ -70,6 +70,14 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT } serverErrCh := make(chan error, 1) + turnWSModeCh := make(chan bool, 2) + hooks := &OpenAIWSIngressHooks{ + AfterTurn: func(_ int, result *OpenAIForwardResult, turnErr error) { + if turnErr == nil && result != nil { + turnWSModeCh <- result.OpenAIWSMode + } + }, + } wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { conn, err := coderws.Accept(w, r, &coderws.AcceptOptions{ CompressionMode: coderws.CompressionContextTakeover, @@ -78,7 +86,9 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT serverErrCh <- err return } - defer conn.CloseNow() + defer func() { + _ = conn.CloseNow() + }() rec := httptest.NewRecorder() ginCtx, _ := gin.CreateTestContext(rec) @@ -99,7 +109,7 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT return } - serverErrCh <- svc.ProxyResponsesWebSocketFromClient(r.Context(), ginCtx, conn, account, "sk-test", firstMessage, nil) + serverErrCh <- svc.ProxyResponsesWebSocketFromClient(r.Context(), ginCtx, conn, account, "sk-test", firstMessage, hooks) })) defer wsServer.Close() @@ -107,7 +117,9 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http"), nil) cancelDial() require.NoError(t, err) - defer clientConn.CloseNow() + defer func() { + _ = clientConn.CloseNow() + }() writeMessage := func(payload string) { writeCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) @@ -132,6 +144,8 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT secondTurnEvent := readMessage() require.Equal(t, "response.completed", gjson.GetBytes(secondTurnEvent, "type").String()) require.Equal(t, "resp_ingress_turn_2", gjson.GetBytes(secondTurnEvent, "response.id").String()) + require.True(t, <-turnWSModeCh, "首轮 turn 应标记为 WS 模式") + require.True(t, <-turnWSModeCh, "第二轮 turn 应标记为 WS 模式") require.NoError(t, clientConn.Close(coderws.StatusNormalClosure, "done")) diff --git a/backend/internal/service/openai_ws_forwarder_success_test.go b/backend/internal/service/openai_ws_forwarder_success_test.go index 95ef85d9a..e41bfec61 100644 --- a/backend/internal/service/openai_ws_forwarder_success_test.go +++ b/backend/internal/service/openai_ws_forwarder_success_test.go @@ -148,6 +148,7 @@ func TestOpenAIGatewayService_Forward_WSv2_SuccessAndBindSticky(t *testing.T) { require.Equal(t, 7, result.Usage.OutputTokens) require.Equal(t, 3, result.Usage.CacheReadInputTokens) require.Equal(t, "resp_new_1", result.RequestID) + require.True(t, result.OpenAIWSMode) require.False(t, gjson.GetBytes(upstream.lastBody, "model").Exists(), "WSv2 成功时不应回落 HTTP 上游") received := <-receivedCh @@ -179,6 +180,20 @@ func requestToJSONString(payload map[string]any) string { return string(b) } +func TestOpenAIWSPayloadString_OnlyAcceptsStringValues(t *testing.T) { + payload := map[string]any{ + "type": nil, + "model": 123, + "prompt_cache_key": " cache-key ", + "previous_response_id": []byte(" resp_1 "), + } + + require.Equal(t, "", openAIWSPayloadString(payload, "type")) + require.Equal(t, "", openAIWSPayloadString(payload, "model")) + require.Equal(t, "cache-key", openAIWSPayloadString(payload, "prompt_cache_key")) + require.Equal(t, "resp_1", openAIWSPayloadString(payload, "previous_response_id")) +} + func TestOpenAIGatewayService_Forward_WSv2_PoolReuseNotOneToOne(t *testing.T) { gin.SetMode(gin.TestMode) From 31959cf71627d8ac0becbf49126579149f154e71 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 11:07:57 +0800 Subject: [PATCH 018/120] =?UTF-8?q?refactor(openai):=20=E9=99=8D=E4=BD=8ER?= =?UTF-8?q?esponses=E4=B8=8EWS=20ingress=E5=88=86=E6=94=AF=E5=B5=8C?= =?UTF-8?q?=E5=A5=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 抽取 function_call_output 预校验 helper,改为早返回 - 抽取用户/账号槽位获取 helper,收敛等待队列分支 - WS ingress 事件类型解析改为 switch,减少 if 嵌套 --- .../handler/openai_gateway_handler.go | 322 ++++++++++-------- .../internal/service/openai_ws_forwarder.go | 20 +- 2 files changed, 193 insertions(+), 149 deletions(-) diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index a0155de4d..2fee413c3 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -143,34 +143,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { setOpsRequestContext(c, reqModel, reqStream, body) // 提前校验 function_call_output 是否具备可关联上下文,避免上游 400。 - // 要求 previous_response_id,或 input 内存在带 call_id 的 tool_call/function_call, - // 或带 id 且与 call_id 匹配的 item_reference。 - // 此路径需要遍历 input 数组做 call_id 关联检查,保留 Unmarshal - if gjson.GetBytes(body, `input.#(type=="function_call_output")`).Exists() { - var reqBody map[string]any - if err := json.Unmarshal(body, &reqBody); err == nil { - c.Set(service.OpenAIParsedRequestBodyKey, reqBody) - validation := service.ValidateFunctionCallOutputContext(reqBody) - if validation.HasFunctionCallOutput { - previousResponseID, _ := reqBody["previous_response_id"].(string) - if strings.TrimSpace(previousResponseID) == "" && !validation.HasToolCallContext { - if validation.HasFunctionCallOutputMissingCallID { - reqLog.Warn("openai.request_validation_failed", - zap.String("reason", "function_call_output_missing_call_id"), - ) - h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id") - return - } - if !validation.HasItemReferenceForAllCallIDs { - reqLog.Warn("openai.request_validation_failed", - zap.String("reason", "function_call_output_missing_item_reference"), - ) - h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires item_reference ids matching each call_id, or previous_response_id/tool_call context; if relying on history, ensure store=true and reuse previous_response_id") - return - } - } - } - } + if !h.validateFunctionCallOutputRequest(c, body, reqLog) { + return } // 绑定错误透传服务,允许 service 层在非 failover 错误场景复用规则。 @@ -184,51 +158,11 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { service.SetOpsLatencyMs(c, service.OpsAuthLatencyMsKey, time.Since(requestStart).Milliseconds()) routingStart := time.Now() - // 0. 先尝试直接抢占用户槽位(快速路径) - userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(c.Request.Context(), subject.UserID, subject.Concurrency) - if err != nil { - reqLog.Warn("openai.user_slot_acquire_failed", zap.Error(err)) - h.handleConcurrencyError(c, err, "user", streamStarted) + userReleaseFunc, acquired := h.acquireResponsesUserSlot(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted, reqLog) + if !acquired { return } - - waitCounted := false - if !userAcquired { - // 仅在抢槽失败时才进入等待队列,减少常态请求 Redis 写入。 - maxWait := service.CalculateMaxWait(subject.Concurrency) - canWait, waitErr := h.concurrencyHelper.IncrementWaitCount(c.Request.Context(), subject.UserID, maxWait) - if waitErr != nil { - reqLog.Warn("openai.user_wait_counter_increment_failed", zap.Error(waitErr)) - // 按现有降级语义:等待计数异常时放行后续抢槽流程 - } else if !canWait { - reqLog.Info("openai.user_wait_queue_full", zap.Int("max_wait", maxWait)) - h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") - return - } - if waitErr == nil && canWait { - waitCounted = true - } - defer func() { - if waitCounted { - h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) - } - }() - - userReleaseFunc, err = h.concurrencyHelper.AcquireUserSlotWithWait(c, subject.UserID, subject.Concurrency, reqStream, &streamStarted) - if err != nil { - reqLog.Warn("openai.user_slot_acquire_failed_after_wait", zap.Error(err)) - h.handleConcurrencyError(c, err, "user", streamStarted) - return - } - } - - // 用户槽位已获取:退出等待队列计数。 - if waitCounted { - h.concurrencyHelper.DecrementWaitCount(c.Request.Context(), subject.UserID) - waitCounted = false - } // 确保请求取消时也会释放槽位,避免长连接被动中断造成泄漏 - userReleaseFunc = wrapReleaseOnDone(c.Request.Context(), userReleaseFunc) if userReleaseFunc != nil { defer userReleaseFunc() } @@ -296,76 +230,10 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { reqLog.Debug("openai.account_selected", zap.Int64("account_id", account.ID), zap.String("account_name", account.Name)) setOpsSelectedAccount(c, account.ID, account.Platform) - // 3. Acquire account concurrency slot - accountReleaseFunc := selection.ReleaseFunc - if !selection.Acquired { - if selection.WaitPlan == nil { - h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", streamStarted) - return - } - - // 先快速尝试一次账号槽位,命中则跳过等待计数写入。 - fastReleaseFunc, fastAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot( - c.Request.Context(), - account.ID, - selection.WaitPlan.MaxConcurrency, - ) - if err != nil { - reqLog.Warn("openai.account_slot_quick_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err)) - h.handleConcurrencyError(c, err, "account", streamStarted) - return - } - if fastAcquired { - accountReleaseFunc = fastReleaseFunc - if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil { - reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err)) - } - } else { - accountWaitCounted := false - canWait, err := h.concurrencyHelper.IncrementAccountWaitCount(c.Request.Context(), account.ID, selection.WaitPlan.MaxWaiting) - if err != nil { - reqLog.Warn("openai.account_wait_counter_increment_failed", zap.Int64("account_id", account.ID), zap.Error(err)) - } else if !canWait { - reqLog.Info("openai.account_wait_queue_full", - zap.Int64("account_id", account.ID), - zap.Int("max_waiting", selection.WaitPlan.MaxWaiting), - ) - h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", streamStarted) - return - } - if err == nil && canWait { - accountWaitCounted = true - } - releaseWait := func() { - if accountWaitCounted { - h.concurrencyHelper.DecrementAccountWaitCount(c.Request.Context(), account.ID) - accountWaitCounted = false - } - } - - accountReleaseFunc, err = h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( - c, - account.ID, - selection.WaitPlan.MaxConcurrency, - selection.WaitPlan.Timeout, - reqStream, - &streamStarted, - ) - if err != nil { - reqLog.Warn("openai.account_slot_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err)) - releaseWait() - h.handleConcurrencyError(c, err, "account", streamStarted) - return - } - // Slot acquired: no longer waiting in queue. - releaseWait() - if err := h.gatewayService.BindStickySession(c.Request.Context(), apiKey.GroupID, sessionHash, account.ID); err != nil { - reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err)) - } - } + accountReleaseFunc, acquired := h.acquireResponsesAccountSlot(c, apiKey.GroupID, sessionHash, selection, reqStream, &streamStarted, reqLog) + if !acquired { + return } - // 账号槽位/等待计数需要在超时或断开时安全回收 - accountReleaseFunc = wrapReleaseOnDone(c.Request.Context(), accountReleaseFunc) // Forward request service.SetOpsLatencyMs(c, service.OpsRoutingLatencyMsKey, time.Since(routingStart).Milliseconds()) @@ -453,6 +321,182 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } } +func (h *OpenAIGatewayHandler) validateFunctionCallOutputRequest(c *gin.Context, body []byte, reqLog *zap.Logger) bool { + if !gjson.GetBytes(body, `input.#(type=="function_call_output")`).Exists() { + return true + } + + var reqBody map[string]any + if err := json.Unmarshal(body, &reqBody); err != nil { + // 保持原有容错语义:解析失败时跳过预校验,沿用后续上游校验结果。 + return true + } + + c.Set(service.OpenAIParsedRequestBodyKey, reqBody) + validation := service.ValidateFunctionCallOutputContext(reqBody) + if !validation.HasFunctionCallOutput { + return true + } + + previousResponseID, _ := reqBody["previous_response_id"].(string) + if strings.TrimSpace(previousResponseID) != "" || validation.HasToolCallContext { + return true + } + + if validation.HasFunctionCallOutputMissingCallID { + reqLog.Warn("openai.request_validation_failed", + zap.String("reason", "function_call_output_missing_call_id"), + ) + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires call_id or previous_response_id; if relying on history, ensure store=true and reuse previous_response_id") + return false + } + if validation.HasItemReferenceForAllCallIDs { + return true + } + + reqLog.Warn("openai.request_validation_failed", + zap.String("reason", "function_call_output_missing_item_reference"), + ) + h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "function_call_output requires item_reference ids matching each call_id, or previous_response_id/tool_call context; if relying on history, ensure store=true and reuse previous_response_id") + return false +} + +func (h *OpenAIGatewayHandler) acquireResponsesUserSlot( + c *gin.Context, + userID int64, + userConcurrency int, + reqStream bool, + streamStarted *bool, + reqLog *zap.Logger, +) (func(), bool) { + ctx := c.Request.Context() + userReleaseFunc, userAcquired, err := h.concurrencyHelper.TryAcquireUserSlot(ctx, userID, userConcurrency) + if err != nil { + reqLog.Warn("openai.user_slot_acquire_failed", zap.Error(err)) + h.handleConcurrencyError(c, err, "user", *streamStarted) + return nil, false + } + if userAcquired { + return wrapReleaseOnDone(ctx, userReleaseFunc), true + } + + maxWait := service.CalculateMaxWait(userConcurrency) + canWait, waitErr := h.concurrencyHelper.IncrementWaitCount(ctx, userID, maxWait) + if waitErr != nil { + reqLog.Warn("openai.user_wait_counter_increment_failed", zap.Error(waitErr)) + // 按现有降级语义:等待计数异常时放行后续抢槽流程 + } else if !canWait { + reqLog.Info("openai.user_wait_queue_full", zap.Int("max_wait", maxWait)) + h.errorResponse(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later") + return nil, false + } + + waitCounted := waitErr == nil && canWait + defer func() { + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(ctx, userID) + } + }() + + userReleaseFunc, err = h.concurrencyHelper.AcquireUserSlotWithWait(c, userID, userConcurrency, reqStream, streamStarted) + if err != nil { + reqLog.Warn("openai.user_slot_acquire_failed_after_wait", zap.Error(err)) + h.handleConcurrencyError(c, err, "user", *streamStarted) + return nil, false + } + + // 槽位获取成功后,立刻退出等待计数。 + if waitCounted { + h.concurrencyHelper.DecrementWaitCount(ctx, userID) + waitCounted = false + } + return wrapReleaseOnDone(ctx, userReleaseFunc), true +} + +func (h *OpenAIGatewayHandler) acquireResponsesAccountSlot( + c *gin.Context, + groupID *int64, + sessionHash string, + selection *service.AccountSelectionResult, + reqStream bool, + streamStarted *bool, + reqLog *zap.Logger, +) (func(), bool) { + if selection == nil || selection.Account == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", *streamStarted) + return nil, false + } + + ctx := c.Request.Context() + account := selection.Account + if selection.Acquired { + return wrapReleaseOnDone(ctx, selection.ReleaseFunc), true + } + if selection.WaitPlan == nil { + h.handleStreamingAwareError(c, http.StatusServiceUnavailable, "api_error", "No available accounts", *streamStarted) + return nil, false + } + + fastReleaseFunc, fastAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot( + ctx, + account.ID, + selection.WaitPlan.MaxConcurrency, + ) + if err != nil { + reqLog.Warn("openai.account_slot_quick_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + h.handleConcurrencyError(c, err, "account", *streamStarted) + return nil, false + } + if fastAcquired { + if err := h.gatewayService.BindStickySession(ctx, groupID, sessionHash, account.ID); err != nil { + reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + } + return wrapReleaseOnDone(ctx, fastReleaseFunc), true + } + + canWait, waitErr := h.concurrencyHelper.IncrementAccountWaitCount(ctx, account.ID, selection.WaitPlan.MaxWaiting) + if waitErr != nil { + reqLog.Warn("openai.account_wait_counter_increment_failed", zap.Int64("account_id", account.ID), zap.Error(waitErr)) + } else if !canWait { + reqLog.Info("openai.account_wait_queue_full", + zap.Int64("account_id", account.ID), + zap.Int("max_waiting", selection.WaitPlan.MaxWaiting), + ) + h.handleStreamingAwareError(c, http.StatusTooManyRequests, "rate_limit_error", "Too many pending requests, please retry later", *streamStarted) + return nil, false + } + + accountWaitCounted := waitErr == nil && canWait + releaseWait := func() { + if accountWaitCounted { + h.concurrencyHelper.DecrementAccountWaitCount(ctx, account.ID) + accountWaitCounted = false + } + } + defer releaseWait() + + accountReleaseFunc, err := h.concurrencyHelper.AcquireAccountSlotWithWaitTimeout( + c, + account.ID, + selection.WaitPlan.MaxConcurrency, + selection.WaitPlan.Timeout, + reqStream, + streamStarted, + ) + if err != nil { + reqLog.Warn("openai.account_slot_acquire_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + h.handleConcurrencyError(c, err, "account", *streamStarted) + return nil, false + } + + // Slot acquired: no longer waiting in queue. + releaseWait() + if err := h.gatewayService.BindStickySession(ctx, groupID, sessionHash, account.ID); err != nil { + reqLog.Warn("openai.bind_sticky_session_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + } + return wrapReleaseOnDone(ctx, accountReleaseFunc), true +} + // ResponsesWebSocket handles OpenAI Responses API WebSocket ingress endpoint // GET /openai/v1/responses (Upgrade: websocket) func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 68492da37..ca81dec1c 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -1639,18 +1639,18 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } eventType := openAIWSPayloadString(payload, "type") - if eventType == "" { + switch eventType { + case "": eventType = "response.create" payload["type"] = eventType - } - if eventType != "response.create" { - if eventType == "response.append" { - return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( - coderws.StatusPolicyViolation, - "response.append is not supported in ws v2; use response.create with previous_response_id", - nil, - ) - } + case "response.create": + case "response.append": + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( + coderws.StatusPolicyViolation, + "response.append is not supported in ws v2; use response.create with previous_response_id", + nil, + ) + default: return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( coderws.StatusPolicyViolation, fmt.Sprintf("unsupported websocket request type: %s", eventType), From cfaf40ef61d73ffd18d889e49e3fca52dacb874b Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 12:31:10 +0800 Subject: [PATCH 019/120] =?UTF-8?q?feat(usage):=20=E5=BC=95=E5=85=A5=20req?= =?UTF-8?q?uest=5Ftype=20=E6=9E=9A=E4=B8=BE=E5=B9=B6=E5=AE=8C=E6=88=90?= =?UTF-8?q?=E5=89=8D=E5=90=8E=E7=AB=AF=E5=85=BC=E5=AE=B9=E5=8D=87=E7=BA=A7?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 后端新增 request_type 字段与查询过滤优先级,保留 stream/openai_ws_mode 兼容 - 前端新增 request_type 类型与解析工具,统一 WS/流式/同步展示与筛选 - 增加迁移脚本、接口适配与回归单测,修复 request_type 回退兼容缺陷 Co-Authored-By: Claude Opus 4.6 --- .../handler/admin/dashboard_handler.go | 37 ++- .../dashboard_handler_request_type_test.go | 132 ++++++++ .../admin/usage_cleanup_handler_test.go | 86 ++++++ .../internal/handler/admin/usage_handler.go | 54 +++- .../admin/usage_handler_request_type_test.go | 117 +++++++ backend/internal/handler/dto/mappers.go | 16 +- .../handler/dto/mappers_usage_test.go | 45 +++ backend/internal/handler/dto/types.go | 12 +- .../handler/sora_gateway_handler_test.go | 4 +- backend/internal/handler/usage_handler.go | 13 +- .../usage_handler_request_type_test.go | 80 +++++ .../pkg/usagestats/usage_log_types.go | 1 + .../migrations_schema_integration_test.go | 1 + .../internal/repository/usage_cleanup_repo.go | 6 +- .../repository/usage_cleanup_repo_test.go | 17 ++ backend/internal/repository/usage_log_repo.go | 44 ++- .../usage_log_repo_integration_test.go | 44 ++- .../usage_log_repo_request_type_test.go | 285 ++++++++++++++++++ backend/internal/server/api_contract_test.go | 4 +- .../internal/service/account_usage_service.go | 8 +- backend/internal/service/dashboard_service.go | 8 +- backend/internal/service/ratelimit_service.go | 4 +- backend/internal/service/usage_cleanup.go | 1 + .../internal/service/usage_cleanup_service.go | 13 + .../service/usage_cleanup_service_test.go | 47 +++ backend/internal/service/usage_log.go | 106 ++++++- backend/internal/service/usage_log_test.go | 112 +++++++ backend/migrations/001_init.sql | 2 + .../061_add_usage_log_request_type.sql | 29 ++ frontend/src/api/admin/dashboard.ts | 5 +- frontend/src/api/admin/usage.ts | 5 +- .../admin/usage/UsageCleanupDialog.vue | 9 +- .../components/admin/usage/UsageFilters.vue | 11 +- .../src/components/admin/usage/UsageTable.vue | 17 +- frontend/src/i18n/locales/en.ts | 1 + frontend/src/i18n/locales/zh.ts | 1 + frontend/src/types/index.ts | 4 + frontend/src/utils/usageRequestType.ts | 33 ++ frontend/src/views/admin/UsageView.vue | 37 ++- frontend/src/views/user/UsageView.vue | 39 ++- 40 files changed, 1399 insertions(+), 91 deletions(-) create mode 100644 backend/internal/handler/admin/dashboard_handler_request_type_test.go create mode 100644 backend/internal/handler/admin/usage_handler_request_type_test.go create mode 100644 backend/internal/handler/usage_handler_request_type_test.go create mode 100644 backend/internal/repository/usage_log_repo_request_type_test.go create mode 100644 backend/internal/service/usage_log_test.go create mode 100644 backend/migrations/061_add_usage_log_request_type.sql create mode 100644 frontend/src/utils/usageRequestType.ts diff --git a/backend/internal/handler/admin/dashboard_handler.go b/backend/internal/handler/admin/dashboard_handler.go index fab66c04d..7e3185926 100644 --- a/backend/internal/handler/admin/dashboard_handler.go +++ b/backend/internal/handler/admin/dashboard_handler.go @@ -3,6 +3,7 @@ package admin import ( "errors" "strconv" + "strings" "time" "github.com/Wei-Shaw/sub2api/internal/pkg/response" @@ -186,7 +187,7 @@ func (h *DashboardHandler) GetRealtimeMetrics(c *gin.Context) { // GetUsageTrend handles getting usage trend data // GET /api/v1/admin/dashboard/trend -// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, stream, billing_type +// Query params: start_date, end_date (YYYY-MM-DD), granularity (day/hour), user_id, api_key_id, model, account_id, group_id, request_type, stream, billing_type func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { startTime, endTime := parseTimeRange(c) granularity := c.DefaultQuery("granularity", "day") @@ -194,6 +195,7 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { // Parse optional filter params var userID, apiKeyID, accountID, groupID int64 var model string + var requestType *int16 var stream *bool var billingType *int8 @@ -220,9 +222,20 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { if modelStr := c.Query("model"); modelStr != "" { model = modelStr } - if streamStr := c.Query("stream"); streamStr != "" { + if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" { + parsed, err := service.ParseUsageRequestType(requestTypeStr) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + value := int16(parsed) + requestType = &value + } else if streamStr := c.Query("stream"); streamStr != "" { if streamVal, err := strconv.ParseBool(streamStr); err == nil { stream = &streamVal + } else { + response.BadRequest(c, "Invalid stream value, use true or false") + return } } if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { @@ -235,7 +248,7 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { } } - trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType) + trend, err := h.dashboardService.GetUsageTrendWithFilters(c.Request.Context(), startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, requestType, stream, billingType) if err != nil { response.Error(c, 500, "Failed to get usage trend") return @@ -251,12 +264,13 @@ func (h *DashboardHandler) GetUsageTrend(c *gin.Context) { // GetModelStats handles getting model usage statistics // GET /api/v1/admin/dashboard/models -// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, stream, billing_type +// Query params: start_date, end_date (YYYY-MM-DD), user_id, api_key_id, account_id, group_id, request_type, stream, billing_type func (h *DashboardHandler) GetModelStats(c *gin.Context) { startTime, endTime := parseTimeRange(c) // Parse optional filter params var userID, apiKeyID, accountID, groupID int64 + var requestType *int16 var stream *bool var billingType *int8 @@ -280,9 +294,20 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) { groupID = id } } - if streamStr := c.Query("stream"); streamStr != "" { + if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" { + parsed, err := service.ParseUsageRequestType(requestTypeStr) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + value := int16(parsed) + requestType = &value + } else if streamStr := c.Query("stream"); streamStr != "" { if streamVal, err := strconv.ParseBool(streamStr); err == nil { stream = &streamVal + } else { + response.BadRequest(c, "Invalid stream value, use true or false") + return } } if billingTypeStr := c.Query("billing_type"); billingTypeStr != "" { @@ -295,7 +320,7 @@ func (h *DashboardHandler) GetModelStats(c *gin.Context) { } } - stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType) + stats, err := h.dashboardService.GetModelStatsWithFilters(c.Request.Context(), startTime, endTime, userID, apiKeyID, accountID, groupID, requestType, stream, billingType) if err != nil { response.Error(c, 500, "Failed to get model statistics") return diff --git a/backend/internal/handler/admin/dashboard_handler_request_type_test.go b/backend/internal/handler/admin/dashboard_handler_request_type_test.go new file mode 100644 index 000000000..72af6b45e --- /dev/null +++ b/backend/internal/handler/admin/dashboard_handler_request_type_test.go @@ -0,0 +1,132 @@ +package admin + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type dashboardUsageRepoCapture struct { + service.UsageLogRepository + trendRequestType *int16 + trendStream *bool + modelRequestType *int16 + modelStream *bool +} + +func (s *dashboardUsageRepoCapture) GetUsageTrendWithFilters( + ctx context.Context, + startTime, endTime time.Time, + granularity string, + userID, apiKeyID, accountID, groupID int64, + model string, + requestType *int16, + stream *bool, + billingType *int8, +) ([]usagestats.TrendDataPoint, error) { + s.trendRequestType = requestType + s.trendStream = stream + return []usagestats.TrendDataPoint{}, nil +} + +func (s *dashboardUsageRepoCapture) GetModelStatsWithFilters( + ctx context.Context, + startTime, endTime time.Time, + userID, apiKeyID, accountID, groupID int64, + requestType *int16, + stream *bool, + billingType *int8, +) ([]usagestats.ModelStat, error) { + s.modelRequestType = requestType + s.modelStream = stream + return []usagestats.ModelStat{}, nil +} + +func newDashboardRequestTypeTestRouter(repo *dashboardUsageRepoCapture) *gin.Engine { + gin.SetMode(gin.TestMode) + dashboardSvc := service.NewDashboardService(repo, nil, nil, nil) + handler := NewDashboardHandler(dashboardSvc, nil) + router := gin.New() + router.GET("/admin/dashboard/trend", handler.GetUsageTrend) + router.GET("/admin/dashboard/models", handler.GetModelStats) + return router +} + +func TestDashboardTrendRequestTypePriority(t *testing.T) { + repo := &dashboardUsageRepoCapture{} + router := newDashboardRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/trend?request_type=ws_v2&stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + require.NotNil(t, repo.trendRequestType) + require.Equal(t, int16(service.RequestTypeWSV2), *repo.trendRequestType) + require.Nil(t, repo.trendStream) +} + +func TestDashboardTrendInvalidRequestType(t *testing.T) { + repo := &dashboardUsageRepoCapture{} + router := newDashboardRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/trend?request_type=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestDashboardTrendInvalidStream(t *testing.T) { + repo := &dashboardUsageRepoCapture{} + router := newDashboardRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/trend?stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestDashboardModelStatsRequestTypePriority(t *testing.T) { + repo := &dashboardUsageRepoCapture{} + router := newDashboardRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/models?request_type=sync&stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + require.NotNil(t, repo.modelRequestType) + require.Equal(t, int16(service.RequestTypeSync), *repo.modelRequestType) + require.Nil(t, repo.modelStream) +} + +func TestDashboardModelStatsInvalidRequestType(t *testing.T) { + repo := &dashboardUsageRepoCapture{} + router := newDashboardRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/models?request_type=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestDashboardModelStatsInvalidStream(t *testing.T) { + repo := &dashboardUsageRepoCapture{} + router := newDashboardRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/dashboard/models?stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} diff --git a/backend/internal/handler/admin/usage_cleanup_handler_test.go b/backend/internal/handler/admin/usage_cleanup_handler_test.go index ed1c7cc22..6152d5e9d 100644 --- a/backend/internal/handler/admin/usage_cleanup_handler_test.go +++ b/backend/internal/handler/admin/usage_cleanup_handler_test.go @@ -225,6 +225,92 @@ func TestUsageHandlerCreateCleanupTaskInvalidEndDate(t *testing.T) { require.Equal(t, http.StatusBadRequest, recorder.Code) } +func TestUsageHandlerCreateCleanupTaskInvalidRequestType(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 88) + + payload := map[string]any{ + "start_date": "2024-01-01", + "end_date": "2024-01-02", + "timezone": "UTC", + "request_type": "invalid", + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusBadRequest, recorder.Code) +} + +func TestUsageHandlerCreateCleanupTaskRequestTypePriority(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 99) + + payload := map[string]any{ + "start_date": "2024-01-01", + "end_date": "2024-01-02", + "timezone": "UTC", + "request_type": "ws_v2", + "stream": false, + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusOK, recorder.Code) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.created, 1) + created := repo.created[0] + require.NotNil(t, created.Filters.RequestType) + require.Equal(t, int16(service.RequestTypeWSV2), *created.Filters.RequestType) + require.Nil(t, created.Filters.Stream) +} + +func TestUsageHandlerCreateCleanupTaskWithLegacyStream(t *testing.T) { + repo := &cleanupRepoStub{} + cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} + cleanupService := service.NewUsageCleanupService(repo, nil, nil, cfg) + router := setupCleanupRouter(cleanupService, 99) + + payload := map[string]any{ + "start_date": "2024-01-01", + "end_date": "2024-01-02", + "timezone": "UTC", + "stream": true, + } + body, err := json.Marshal(payload) + require.NoError(t, err) + + req := httptest.NewRequest(http.MethodPost, "/api/v1/admin/usage/cleanup-tasks", bytes.NewReader(body)) + req.Header.Set("Content-Type", "application/json") + recorder := httptest.NewRecorder() + router.ServeHTTP(recorder, req) + + require.Equal(t, http.StatusOK, recorder.Code) + + repo.mu.Lock() + defer repo.mu.Unlock() + require.Len(t, repo.created, 1) + created := repo.created[0] + require.Nil(t, created.Filters.RequestType) + require.NotNil(t, created.Filters.Stream) + require.True(t, *created.Filters.Stream) +} + func TestUsageHandlerCreateCleanupTaskSuccess(t *testing.T) { repo := &cleanupRepoStub{} cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true, MaxRangeDays: 31}} diff --git a/backend/internal/handler/admin/usage_handler.go b/backend/internal/handler/admin/usage_handler.go index 5cbf18e67..d0bba7730 100644 --- a/backend/internal/handler/admin/usage_handler.go +++ b/backend/internal/handler/admin/usage_handler.go @@ -51,6 +51,7 @@ type CreateUsageCleanupTaskRequest struct { AccountID *int64 `json:"account_id"` GroupID *int64 `json:"group_id"` Model *string `json:"model"` + RequestType *string `json:"request_type"` Stream *bool `json:"stream"` BillingType *int8 `json:"billing_type"` Timezone string `json:"timezone"` @@ -101,8 +102,17 @@ func (h *UsageHandler) List(c *gin.Context) { model := c.Query("model") + var requestType *int16 var stream *bool - if streamStr := c.Query("stream"); streamStr != "" { + if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" { + parsed, err := service.ParseUsageRequestType(requestTypeStr) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + value := int16(parsed) + requestType = &value + } else if streamStr := c.Query("stream"); streamStr != "" { val, err := strconv.ParseBool(streamStr) if err != nil { response.BadRequest(c, "Invalid stream value, use true or false") @@ -152,6 +162,7 @@ func (h *UsageHandler) List(c *gin.Context) { AccountID: accountID, GroupID: groupID, Model: model, + RequestType: requestType, Stream: stream, BillingType: billingType, StartTime: startTime, @@ -214,8 +225,17 @@ func (h *UsageHandler) Stats(c *gin.Context) { model := c.Query("model") + var requestType *int16 var stream *bool - if streamStr := c.Query("stream"); streamStr != "" { + if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" { + parsed, err := service.ParseUsageRequestType(requestTypeStr) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + value := int16(parsed) + requestType = &value + } else if streamStr := c.Query("stream"); streamStr != "" { val, err := strconv.ParseBool(streamStr) if err != nil { response.BadRequest(c, "Invalid stream value, use true or false") @@ -278,6 +298,7 @@ func (h *UsageHandler) Stats(c *gin.Context) { AccountID: accountID, GroupID: groupID, Model: model, + RequestType: requestType, Stream: stream, BillingType: billingType, StartTime: &startTime, @@ -432,6 +453,19 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { } endTime = endTime.Add(24*time.Hour - time.Nanosecond) + var requestType *int16 + stream := req.Stream + if req.RequestType != nil { + parsed, err := service.ParseUsageRequestType(*req.RequestType) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + value := int16(parsed) + requestType = &value + stream = nil + } + filters := service.UsageCleanupFilters{ StartTime: startTime, EndTime: endTime, @@ -440,7 +474,8 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { AccountID: req.AccountID, GroupID: req.GroupID, Model: req.Model, - Stream: req.Stream, + RequestType: requestType, + Stream: stream, BillingType: req.BillingType, } @@ -464,9 +499,13 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { if filters.Model != nil { model = *filters.Model } - var stream any + var streamValue any if filters.Stream != nil { - stream = *filters.Stream + streamValue = *filters.Stream + } + var requestTypeName any + if filters.RequestType != nil { + requestTypeName = service.RequestTypeFromInt16(*filters.RequestType).String() } var billingType any if filters.BillingType != nil { @@ -481,7 +520,7 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { Body: req, } executeAdminIdempotentJSON(c, "admin.usage.cleanup_tasks.create", idempotencyPayload, service.DefaultWriteIdempotencyTTL(), func(ctx context.Context) (any, error) { - logger.LegacyPrintf("handler.admin.usage", "[UsageCleanup] 请求创建清理任务: operator=%d start=%s end=%s user_id=%v api_key_id=%v account_id=%v group_id=%v model=%v stream=%v billing_type=%v tz=%q", + logger.LegacyPrintf("handler.admin.usage", "[UsageCleanup] 请求创建清理任务: operator=%d start=%s end=%s user_id=%v api_key_id=%v account_id=%v group_id=%v model=%v request_type=%v stream=%v billing_type=%v tz=%q", subject.UserID, filters.StartTime.Format(time.RFC3339), filters.EndTime.Format(time.RFC3339), @@ -490,7 +529,8 @@ func (h *UsageHandler) CreateCleanupTask(c *gin.Context) { accountID, groupID, model, - stream, + requestTypeName, + streamValue, billingType, req.Timezone, ) diff --git a/backend/internal/handler/admin/usage_handler_request_type_test.go b/backend/internal/handler/admin/usage_handler_request_type_test.go new file mode 100644 index 000000000..21add574a --- /dev/null +++ b/backend/internal/handler/admin/usage_handler_request_type_test.go @@ -0,0 +1,117 @@ +package admin + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type adminUsageRepoCapture struct { + service.UsageLogRepository + listFilters usagestats.UsageLogFilters + statsFilters usagestats.UsageLogFilters +} + +func (s *adminUsageRepoCapture) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters usagestats.UsageLogFilters) ([]service.UsageLog, *pagination.PaginationResult, error) { + s.listFilters = filters + return []service.UsageLog{}, &pagination.PaginationResult{ + Total: 0, + Page: params.Page, + PageSize: params.PageSize, + Pages: 0, + }, nil +} + +func (s *adminUsageRepoCapture) GetStatsWithFilters(ctx context.Context, filters usagestats.UsageLogFilters) (*usagestats.UsageStats, error) { + s.statsFilters = filters + return &usagestats.UsageStats{}, nil +} + +func newAdminUsageRequestTypeTestRouter(repo *adminUsageRepoCapture) *gin.Engine { + gin.SetMode(gin.TestMode) + usageSvc := service.NewUsageService(repo, nil, nil, nil) + handler := NewUsageHandler(usageSvc, nil, nil, nil) + router := gin.New() + router.GET("/admin/usage", handler.List) + router.GET("/admin/usage/stats", handler.Stats) + return router +} + +func TestAdminUsageListRequestTypePriority(t *testing.T) { + repo := &adminUsageRepoCapture{} + router := newAdminUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/usage?request_type=ws_v2&stream=false", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + require.NotNil(t, repo.listFilters.RequestType) + require.Equal(t, int16(service.RequestTypeWSV2), *repo.listFilters.RequestType) + require.Nil(t, repo.listFilters.Stream) +} + +func TestAdminUsageListInvalidRequestType(t *testing.T) { + repo := &adminUsageRepoCapture{} + router := newAdminUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/usage?request_type=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestAdminUsageListInvalidStream(t *testing.T) { + repo := &adminUsageRepoCapture{} + router := newAdminUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/usage?stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestAdminUsageStatsRequestTypePriority(t *testing.T) { + repo := &adminUsageRepoCapture{} + router := newAdminUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/usage/stats?request_type=stream&stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + require.NotNil(t, repo.statsFilters.RequestType) + require.Equal(t, int16(service.RequestTypeStream), *repo.statsFilters.RequestType) + require.Nil(t, repo.statsFilters.Stream) +} + +func TestAdminUsageStatsInvalidRequestType(t *testing.T) { + repo := &adminUsageRepoCapture{} + router := newAdminUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/usage/stats?request_type=oops", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestAdminUsageStatsInvalidStream(t *testing.T) { + repo := &adminUsageRepoCapture{} + router := newAdminUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/admin/usage/stats?stream=oops", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} diff --git a/backend/internal/handler/dto/mappers.go b/backend/internal/handler/dto/mappers.go index e399e7b99..17fc65edc 100644 --- a/backend/internal/handler/dto/mappers.go +++ b/backend/internal/handler/dto/mappers.go @@ -385,6 +385,8 @@ func AccountSummaryFromService(a *service.Account) *AccountSummary { func usageLogFromServiceUser(l *service.UsageLog) UsageLog { // 普通用户 DTO:严禁包含管理员字段(例如 account_rate_multiplier、ip_address、account)。 + requestType := l.EffectiveRequestType() + stream, openAIWSMode := service.ApplyLegacyRequestFields(requestType, l.Stream, l.OpenAIWSMode) return UsageLog{ ID: l.ID, UserID: l.UserID, @@ -409,8 +411,9 @@ func usageLogFromServiceUser(l *service.UsageLog) UsageLog { ActualCost: l.ActualCost, RateMultiplier: l.RateMultiplier, BillingType: l.BillingType, - Stream: l.Stream, - OpenAIWSMode: l.OpenAIWSMode, + RequestType: requestType.String(), + Stream: stream, + OpenAIWSMode: openAIWSMode, DurationMs: l.DurationMs, FirstTokenMs: l.FirstTokenMs, ImageCount: l.ImageCount, @@ -465,6 +468,7 @@ func UsageCleanupTaskFromService(task *service.UsageCleanupTask) *UsageCleanupTa AccountID: task.Filters.AccountID, GroupID: task.Filters.GroupID, Model: task.Filters.Model, + RequestType: requestTypeStringPtr(task.Filters.RequestType), Stream: task.Filters.Stream, BillingType: task.Filters.BillingType, }, @@ -480,6 +484,14 @@ func UsageCleanupTaskFromService(task *service.UsageCleanupTask) *UsageCleanupTa } } +func requestTypeStringPtr(requestType *int16) *string { + if requestType == nil { + return nil + } + value := service.RequestTypeFromInt16(*requestType).String() + return &value +} + func SettingFromService(s *service.Setting) *Setting { if s == nil { return nil diff --git a/backend/internal/handler/dto/mappers_usage_test.go b/backend/internal/handler/dto/mappers_usage_test.go index 22d910001..d716bdc49 100644 --- a/backend/internal/handler/dto/mappers_usage_test.go +++ b/backend/internal/handler/dto/mappers_usage_test.go @@ -26,3 +26,48 @@ func TestUsageLogFromService_IncludesOpenAIWSMode(t *testing.T) { require.True(t, UsageLogFromServiceAdmin(wsLog).OpenAIWSMode) require.False(t, UsageLogFromServiceAdmin(httpLog).OpenAIWSMode) } + +func TestUsageLogFromService_PrefersRequestTypeForLegacyFields(t *testing.T) { + t.Parallel() + + log := &service.UsageLog{ + RequestID: "req_2", + Model: "gpt-5.3-codex", + RequestType: service.RequestTypeWSV2, + Stream: false, + OpenAIWSMode: false, + } + + userDTO := UsageLogFromService(log) + adminDTO := UsageLogFromServiceAdmin(log) + + require.Equal(t, "ws_v2", userDTO.RequestType) + require.True(t, userDTO.Stream) + require.True(t, userDTO.OpenAIWSMode) + require.Equal(t, "ws_v2", adminDTO.RequestType) + require.True(t, adminDTO.Stream) + require.True(t, adminDTO.OpenAIWSMode) +} + +func TestUsageCleanupTaskFromService_RequestTypeMapping(t *testing.T) { + t.Parallel() + + requestType := int16(service.RequestTypeStream) + task := &service.UsageCleanupTask{ + ID: 1, + Status: service.UsageCleanupStatusPending, + Filters: service.UsageCleanupFilters{ + RequestType: &requestType, + }, + } + + dtoTask := UsageCleanupTaskFromService(task) + require.NotNil(t, dtoTask) + require.NotNil(t, dtoTask.Filters.RequestType) + require.Equal(t, "stream", *dtoTask.Filters.RequestType) +} + +func TestRequestTypeStringPtrNil(t *testing.T) { + t.Parallel() + require.Nil(t, requestTypeStringPtr(nil)) +} diff --git a/backend/internal/handler/dto/types.go b/backend/internal/handler/dto/types.go index c95a84899..56b986257 100644 --- a/backend/internal/handler/dto/types.go +++ b/backend/internal/handler/dto/types.go @@ -278,11 +278,12 @@ type UsageLog struct { ActualCost float64 `json:"actual_cost"` RateMultiplier float64 `json:"rate_multiplier"` - BillingType int8 `json:"billing_type"` - Stream bool `json:"stream"` - OpenAIWSMode bool `json:"openai_ws_mode"` - DurationMs *int `json:"duration_ms"` - FirstTokenMs *int `json:"first_token_ms"` + BillingType int8 `json:"billing_type"` + RequestType string `json:"request_type"` + Stream bool `json:"stream"` + OpenAIWSMode bool `json:"openai_ws_mode"` + DurationMs *int `json:"duration_ms"` + FirstTokenMs *int `json:"first_token_ms"` // 图片生成字段 ImageCount int `json:"image_count"` @@ -325,6 +326,7 @@ type UsageCleanupFilters struct { AccountID *int64 `json:"account_id,omitempty"` GroupID *int64 `json:"group_id,omitempty"` Model *string `json:"model,omitempty"` + RequestType *string `json:"request_type,omitempty"` Stream *bool `json:"stream,omitempty"` BillingType *int8 `json:"billing_type,omitempty"` } diff --git a/backend/internal/handler/sora_gateway_handler_test.go b/backend/internal/handler/sora_gateway_handler_test.go index cc792350b..01c684ca2 100644 --- a/backend/internal/handler/sora_gateway_handler_test.go +++ b/backend/internal/handler/sora_gateway_handler_test.go @@ -314,10 +314,10 @@ func (s *stubUsageLogRepo) GetAccountTodayStats(ctx context.Context, accountID i func (s *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) { return nil, nil } -func (s *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { +func (s *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { return nil, nil } -func (s *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { +func (s *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { return nil, nil } func (s *stubUsageLogRepo) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) { diff --git a/backend/internal/handler/usage_handler.go b/backend/internal/handler/usage_handler.go index b8182dad1..2bd0e0d7b 100644 --- a/backend/internal/handler/usage_handler.go +++ b/backend/internal/handler/usage_handler.go @@ -2,6 +2,7 @@ package handler import ( "strconv" + "strings" "time" "github.com/Wei-Shaw/sub2api/internal/handler/dto" @@ -65,8 +66,17 @@ func (h *UsageHandler) List(c *gin.Context) { // Parse additional filters model := c.Query("model") + var requestType *int16 var stream *bool - if streamStr := c.Query("stream"); streamStr != "" { + if requestTypeStr := strings.TrimSpace(c.Query("request_type")); requestTypeStr != "" { + parsed, err := service.ParseUsageRequestType(requestTypeStr) + if err != nil { + response.BadRequest(c, err.Error()) + return + } + value := int16(parsed) + requestType = &value + } else if streamStr := c.Query("stream"); streamStr != "" { val, err := strconv.ParseBool(streamStr) if err != nil { response.BadRequest(c, "Invalid stream value, use true or false") @@ -114,6 +124,7 @@ func (h *UsageHandler) List(c *gin.Context) { UserID: subject.UserID, // Always filter by current user for security APIKeyID: apiKeyID, Model: model, + RequestType: requestType, Stream: stream, BillingType: billingType, StartTime: startTime, diff --git a/backend/internal/handler/usage_handler_request_type_test.go b/backend/internal/handler/usage_handler_request_type_test.go new file mode 100644 index 000000000..7c4c79135 --- /dev/null +++ b/backend/internal/handler/usage_handler_request_type_test.go @@ -0,0 +1,80 @@ +package handler + +import ( + "context" + "net/http" + "net/http/httptest" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/gin-gonic/gin" + "github.com/stretchr/testify/require" +) + +type userUsageRepoCapture struct { + service.UsageLogRepository + listFilters usagestats.UsageLogFilters +} + +func (s *userUsageRepoCapture) ListWithFilters(ctx context.Context, params pagination.PaginationParams, filters usagestats.UsageLogFilters) ([]service.UsageLog, *pagination.PaginationResult, error) { + s.listFilters = filters + return []service.UsageLog{}, &pagination.PaginationResult{ + Total: 0, + Page: params.Page, + PageSize: params.PageSize, + Pages: 0, + }, nil +} + +func newUserUsageRequestTypeTestRouter(repo *userUsageRepoCapture) *gin.Engine { + gin.SetMode(gin.TestMode) + usageSvc := service.NewUsageService(repo, nil, nil, nil) + handler := NewUsageHandler(usageSvc, nil) + router := gin.New() + router.Use(func(c *gin.Context) { + c.Set(string(middleware2.ContextKeyUser), middleware2.AuthSubject{UserID: 42}) + c.Next() + }) + router.GET("/usage", handler.List) + return router +} + +func TestUserUsageListRequestTypePriority(t *testing.T) { + repo := &userUsageRepoCapture{} + router := newUserUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/usage?request_type=ws_v2&stream=bad", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + require.Equal(t, int64(42), repo.listFilters.UserID) + require.NotNil(t, repo.listFilters.RequestType) + require.Equal(t, int16(service.RequestTypeWSV2), *repo.listFilters.RequestType) + require.Nil(t, repo.listFilters.Stream) +} + +func TestUserUsageListInvalidRequestType(t *testing.T) { + repo := &userUsageRepoCapture{} + router := newUserUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/usage?request_type=invalid", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} + +func TestUserUsageListInvalidStream(t *testing.T) { + repo := &userUsageRepoCapture{} + router := newUserUsageRequestTypeTestRouter(repo) + + req := httptest.NewRequest(http.MethodGet, "/usage?stream=invalid", nil) + rec := httptest.NewRecorder() + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusBadRequest, rec.Code) +} diff --git a/backend/internal/pkg/usagestats/usage_log_types.go b/backend/internal/pkg/usagestats/usage_log_types.go index 2f6c7fe0b..5f4e13f54 100644 --- a/backend/internal/pkg/usagestats/usage_log_types.go +++ b/backend/internal/pkg/usagestats/usage_log_types.go @@ -139,6 +139,7 @@ type UsageLogFilters struct { AccountID int64 GroupID int64 Model string + RequestType *int16 Stream *bool BillingType *int8 StartTime *time.Time diff --git a/backend/internal/repository/migrations_schema_integration_test.go b/backend/internal/repository/migrations_schema_integration_test.go index e94aa9054..72422d18a 100644 --- a/backend/internal/repository/migrations_schema_integration_test.go +++ b/backend/internal/repository/migrations_schema_integration_test.go @@ -42,6 +42,7 @@ func TestMigrationsRunner_IsIdempotent_AndSchemaIsUpToDate(t *testing.T) { // usage_logs: billing_type used by filters/stats requireColumn(t, tx, "usage_logs", "billing_type", "smallint", 0, false) + requireColumn(t, tx, "usage_logs", "request_type", "smallint", 0, false) requireColumn(t, tx, "usage_logs", "openai_ws_mode", "boolean", 0, false) // settings table should exist diff --git a/backend/internal/repository/usage_cleanup_repo.go b/backend/internal/repository/usage_cleanup_repo.go index 9c0213573..30e70569d 100644 --- a/backend/internal/repository/usage_cleanup_repo.go +++ b/backend/internal/repository/usage_cleanup_repo.go @@ -362,7 +362,11 @@ func buildUsageCleanupWhere(filters service.UsageCleanupFilters) (string, []any) idx++ } } - if filters.Stream != nil { + if filters.RequestType != nil { + conditions = append(conditions, fmt.Sprintf("request_type = $%d", idx)) + args = append(args, *filters.RequestType) + idx++ + } else if filters.Stream != nil { conditions = append(conditions, fmt.Sprintf("stream = $%d", idx)) args = append(args, *filters.Stream) idx++ diff --git a/backend/internal/repository/usage_cleanup_repo_test.go b/backend/internal/repository/usage_cleanup_repo_test.go index 0ca30ec7d..788f2fd31 100644 --- a/backend/internal/repository/usage_cleanup_repo_test.go +++ b/backend/internal/repository/usage_cleanup_repo_test.go @@ -466,6 +466,23 @@ func TestBuildUsageCleanupWhere(t *testing.T) { require.Equal(t, []any{start, end, userID, apiKeyID, accountID, groupID, "gpt-4", stream, billingType}, args) } +func TestBuildUsageCleanupWhereRequestTypePriority(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + requestType := int16(service.RequestTypeWSV2) + stream := false + + where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + RequestType: &requestType, + Stream: &stream, + }) + + require.Equal(t, "created_at >= $1 AND created_at <= $2 AND request_type = $3", where) + require.Equal(t, []any{start, end, requestType}, args) +} + func TestBuildUsageCleanupWhereModelEmpty(t *testing.T) { start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) end := start.Add(24 * time.Hour) diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index ddad79363..18a782ab9 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -22,7 +22,7 @@ import ( "github.com/lib/pq" ) -const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, stream, openai_ws_mode, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, media_type, reasoning_effort, cache_ttl_overridden, created_at" +const usageLogSelectColumns = "id, user_id, api_key_id, account_id, request_id, model, group_id, subscription_id, input_tokens, output_tokens, cache_creation_tokens, cache_read_tokens, cache_creation_5m_tokens, cache_creation_1h_tokens, input_cost, output_cost, cache_creation_cost, cache_read_cost, total_cost, actual_cost, rate_multiplier, account_rate_multiplier, billing_type, request_type, stream, openai_ws_mode, duration_ms, first_token_ms, user_agent, ip_address, image_count, image_size, media_type, reasoning_effort, cache_ttl_overridden, created_at" // dateFormatWhitelist 将 granularity 参数映射为 PostgreSQL TO_CHAR 格式字符串,防止外部输入直接拼入 SQL var dateFormatWhitelist = map[string]string{ @@ -98,6 +98,8 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) log.RequestID = requestID rateMultiplier := log.RateMultiplier + log.SyncRequestTypeAndLegacyFields() + requestType := int16(log.RequestType) query := ` INSERT INTO usage_logs ( @@ -123,6 +125,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) rate_multiplier, account_rate_multiplier, billing_type, + request_type, stream, openai_ws_mode, duration_ms, @@ -141,7 +144,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, - $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34 + $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35 ) ON CONFLICT (request_id, api_key_id) DO NOTHING RETURNING id, created_at @@ -185,6 +188,7 @@ func (r *usageLogRepository) Create(ctx context.Context, log *service.UsageLog) rateMultiplier, log.AccountRateMultiplier, log.BillingType, + requestType, log.Stream, log.OpenAIWSMode, duration, @@ -1401,7 +1405,10 @@ func (r *usageLogRepository) ListWithFilters(ctx context.Context, params paginat conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) args = append(args, filters.Model) } - if filters.Stream != nil { + if filters.RequestType != nil { + conditions = append(conditions, fmt.Sprintf("request_type = $%d", len(args)+1)) + args = append(args, *filters.RequestType) + } else if filters.Stream != nil { conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) args = append(args, *filters.Stream) } @@ -1600,7 +1607,7 @@ func (r *usageLogRepository) GetBatchAPIKeyUsageStats(ctx context.Context, apiKe } // GetUsageTrendWithFilters returns usage trend data with optional filters -func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) (results []TrendDataPoint, err error) { +func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) (results []TrendDataPoint, err error) { dateFormat := safeDateFormat(granularity) query := fmt.Sprintf(` @@ -1638,7 +1645,10 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND model = $%d", len(args)+1) args = append(args, model) } - if stream != nil { + if requestType != nil { + query += fmt.Sprintf(" AND request_type = $%d", len(args)+1) + args = append(args, *requestType) + } else if stream != nil { query += fmt.Sprintf(" AND stream = $%d", len(args)+1) args = append(args, *stream) } @@ -1669,7 +1679,7 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start } // GetModelStatsWithFilters returns model statistics with optional filters -func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) (results []ModelStat, err error) { +func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) (results []ModelStat, err error) { actualCostExpr := "COALESCE(SUM(actual_cost), 0) as actual_cost" // 当仅按 account_id 聚合时,实际费用使用账号倍率(total_cost * account_rate_multiplier)。 if accountID > 0 && userID == 0 && apiKeyID == 0 { @@ -1706,7 +1716,10 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND group_id = $%d", len(args)+1) args = append(args, groupID) } - if stream != nil { + if requestType != nil { + query += fmt.Sprintf(" AND request_type = $%d", len(args)+1) + args = append(args, *requestType) + } else if stream != nil { query += fmt.Sprintf(" AND stream = $%d", len(args)+1) args = append(args, *stream) } @@ -1796,7 +1809,10 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) args = append(args, filters.Model) } - if filters.Stream != nil { + if filters.RequestType != nil { + conditions = append(conditions, fmt.Sprintf("request_type = $%d", len(args)+1)) + args = append(args, *filters.RequestType) + } else if filters.Stream != nil { conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) args = append(args, *filters.Stream) } @@ -2019,7 +2035,7 @@ func (r *usageLogRepository) GetAccountUsageStats(ctx context.Context, accountID } } - models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil, nil) + models, err := r.GetModelStatsWithFilters(ctx, startTime, endTime, 0, 0, accountID, 0, nil, nil, nil) if err != nil { models = []ModelStat{} } @@ -2269,6 +2285,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e rateMultiplier float64 accountRateMultiplier sql.NullFloat64 billingType int16 + requestTypeRaw int16 stream bool openaiWSMode bool durationMs sql.NullInt64 @@ -2307,6 +2324,7 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e &rateMultiplier, &accountRateMultiplier, &billingType, + &requestTypeRaw, &stream, &openaiWSMode, &durationMs, @@ -2344,12 +2362,16 @@ func scanUsageLog(scanner interface{ Scan(...any) error }) (*service.UsageLog, e RateMultiplier: rateMultiplier, AccountRateMultiplier: nullFloat64Ptr(accountRateMultiplier), BillingType: int8(billingType), - Stream: stream, - OpenAIWSMode: openaiWSMode, + RequestType: service.RequestTypeFromInt16(requestTypeRaw), ImageCount: imageCount, CacheTTLOverridden: cacheTTLOverridden, CreatedAt: createdAt, } + // 先回填 legacy 字段,再基于 legacy + request_type 计算最终请求类型,保证历史数据兼容。 + log.Stream = stream + log.OpenAIWSMode = openaiWSMode + log.RequestType = log.EffectiveRequestType() + log.Stream, log.OpenAIWSMode = service.ApplyLegacyRequestFields(log.RequestType, stream, openaiWSMode) if requestID.Valid { log.RequestID = requestID.String diff --git a/backend/internal/repository/usage_log_repo_integration_test.go b/backend/internal/repository/usage_log_repo_integration_test.go index 1947fb6e1..4d50f7de4 100644 --- a/backend/internal/repository/usage_log_repo_integration_test.go +++ b/backend/internal/repository/usage_log_repo_integration_test.go @@ -156,6 +156,36 @@ func (s *UsageLogRepoSuite) TestGetByID_ReturnsOpenAIWSMode() { s.Require().True(got.OpenAIWSMode) } +func (s *UsageLogRepoSuite) TestGetByID_ReturnsRequestTypeAndLegacyFallback() { + user := mustCreateUser(s.T(), s.client, &service.User{Email: "getbyid-request-type@test.com"}) + apiKey := mustCreateApiKey(s.T(), s.client, &service.APIKey{UserID: user.ID, Key: "sk-getbyid-request-type", Name: "k"}) + account := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-getbyid-request-type"}) + + log := &service.UsageLog{ + UserID: user.ID, + APIKeyID: apiKey.ID, + AccountID: account.ID, + RequestID: uuid.New().String(), + Model: "gpt-5.3-codex", + RequestType: service.RequestTypeWSV2, + Stream: true, + OpenAIWSMode: false, + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1.0, + ActualCost: 1.0, + CreatedAt: timezone.Today().Add(4 * time.Hour), + } + _, err := s.repo.Create(s.ctx, log) + s.Require().NoError(err) + + got, err := s.repo.GetByID(s.ctx, log.ID) + s.Require().NoError(err) + s.Require().Equal(service.RequestTypeWSV2, got.RequestType) + s.Require().True(got.Stream) + s.Require().True(got.OpenAIWSMode) +} + // --- Delete --- func (s *UsageLogRepoSuite) TestDelete() { @@ -970,17 +1000,17 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters() { endTime := base.Add(48 * time.Hour) // Test with user filter - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil, nil) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, 0, 0, 0, "", nil, nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters user filter") s.Require().Len(trend, 2) // Test with apiKey filter - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil, nil) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", 0, apiKey.ID, 0, 0, "", nil, nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters apiKey filter") s.Require().Len(trend, 2) // Test with both filters - trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil, nil) + trend, err = s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "day", user.ID, apiKey.ID, 0, 0, "", nil, nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters both filters") s.Require().Len(trend, 2) } @@ -997,7 +1027,7 @@ func (s *UsageLogRepoSuite) TestGetUsageTrendWithFilters_HourlyGranularity() { startTime := base.Add(-1 * time.Hour) endTime := base.Add(3 * time.Hour) - trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil, nil) + trend, err := s.repo.GetUsageTrendWithFilters(s.ctx, startTime, endTime, "hour", user.ID, 0, 0, 0, "", nil, nil, nil) s.Require().NoError(err, "GetUsageTrendWithFilters hourly") s.Require().Len(trend, 2) } @@ -1043,17 +1073,17 @@ func (s *UsageLogRepoSuite) TestGetModelStatsWithFilters() { endTime := base.Add(2 * time.Hour) // Test with user filter - stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil, nil) + stats, err := s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, user.ID, 0, 0, 0, nil, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters user filter") s.Require().Len(stats, 2) // Test with apiKey filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil, nil) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, apiKey.ID, 0, 0, nil, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters apiKey filter") s.Require().Len(stats, 2) // Test with account filter - stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil, nil) + stats, err = s.repo.GetModelStatsWithFilters(s.ctx, startTime, endTime, 0, 0, account.ID, 0, nil, nil, nil) s.Require().NoError(err, "GetModelStatsWithFilters account filter") s.Require().Len(stats, 2) } diff --git a/backend/internal/repository/usage_log_repo_request_type_test.go b/backend/internal/repository/usage_log_repo_request_type_test.go new file mode 100644 index 000000000..33082c046 --- /dev/null +++ b/backend/internal/repository/usage_log_repo_request_type_test.go @@ -0,0 +1,285 @@ +package repository + +import ( + "context" + "database/sql" + "fmt" + "reflect" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestUsageLogRepositoryCreateSyncRequestTypeAndLegacyFields(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageLogRepository{sql: db} + + createdAt := time.Date(2025, 1, 1, 12, 0, 0, 0, time.UTC) + log := &service.UsageLog{ + UserID: 1, + APIKeyID: 2, + AccountID: 3, + RequestID: "req-1", + Model: "gpt-5", + InputTokens: 10, + OutputTokens: 20, + TotalCost: 1, + ActualCost: 1, + BillingType: service.BillingTypeBalance, + RequestType: service.RequestTypeWSV2, + Stream: false, + OpenAIWSMode: false, + CreatedAt: createdAt, + } + + mock.ExpectQuery("INSERT INTO usage_logs"). + WithArgs( + log.UserID, + log.APIKeyID, + log.AccountID, + log.RequestID, + log.Model, + sqlmock.AnyArg(), // group_id + sqlmock.AnyArg(), // subscription_id + log.InputTokens, + log.OutputTokens, + log.CacheCreationTokens, + log.CacheReadTokens, + log.CacheCreation5mTokens, + log.CacheCreation1hTokens, + log.InputCost, + log.OutputCost, + log.CacheCreationCost, + log.CacheReadCost, + log.TotalCost, + log.ActualCost, + log.RateMultiplier, + log.AccountRateMultiplier, + log.BillingType, + int16(service.RequestTypeWSV2), + true, + true, + sqlmock.AnyArg(), // duration_ms + sqlmock.AnyArg(), // first_token_ms + sqlmock.AnyArg(), // user_agent + sqlmock.AnyArg(), // ip_address + log.ImageCount, + sqlmock.AnyArg(), // image_size + sqlmock.AnyArg(), // media_type + sqlmock.AnyArg(), // reasoning_effort + log.CacheTTLOverridden, + createdAt, + ). + WillReturnRows(sqlmock.NewRows([]string{"id", "created_at"}).AddRow(int64(99), createdAt)) + + inserted, err := repo.Create(context.Background(), log) + require.NoError(t, err) + require.True(t, inserted) + require.Equal(t, int64(99), log.ID) + require.Equal(t, service.RequestTypeWSV2, log.RequestType) + require.True(t, log.Stream) + require.True(t, log.OpenAIWSMode) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageLogRepositoryListWithFiltersRequestTypePriority(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageLogRepository{sql: db} + + requestType := int16(service.RequestTypeWSV2) + stream := false + filters := usagestats.UsageLogFilters{ + RequestType: &requestType, + Stream: &stream, + } + + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_logs WHERE request_type = \\$1"). + WithArgs(requestType). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(0))) + mock.ExpectQuery("SELECT .* FROM usage_logs WHERE request_type = \\$1 ORDER BY id DESC LIMIT \\$2 OFFSET \\$3"). + WithArgs(requestType, 20, 0). + WillReturnRows(sqlmock.NewRows([]string{"id"})) + + logs, page, err := repo.ListWithFilters(context.Background(), pagination.PaginationParams{Page: 1, PageSize: 20}, filters) + require.NoError(t, err) + require.Empty(t, logs) + require.NotNil(t, page) + require.Equal(t, int64(0), page.Total) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageLogRepositoryGetUsageTrendWithFiltersRequestTypePriority(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageLogRepository{sql: db} + + start := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + requestType := int16(service.RequestTypeStream) + stream := true + + mock.ExpectQuery("AND request_type = \\$3"). + WithArgs(start, end, requestType). + WillReturnRows(sqlmock.NewRows([]string{"date", "requests", "input_tokens", "output_tokens", "cache_tokens", "total_tokens", "cost", "actual_cost"})) + + trend, err := repo.GetUsageTrendWithFilters(context.Background(), start, end, "day", 0, 0, 0, 0, "", &requestType, &stream, nil) + require.NoError(t, err) + require.Empty(t, trend) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageLogRepositoryGetModelStatsWithFiltersRequestTypePriority(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageLogRepository{sql: db} + + start := time.Date(2025, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + requestType := int16(service.RequestTypeWSV2) + stream := false + + mock.ExpectQuery("AND request_type = \\$3"). + WithArgs(start, end, requestType). + WillReturnRows(sqlmock.NewRows([]string{"model", "requests", "input_tokens", "output_tokens", "total_tokens", "cost", "actual_cost"})) + + stats, err := repo.GetModelStatsWithFilters(context.Background(), start, end, 0, 0, 0, 0, &requestType, &stream, nil) + require.NoError(t, err) + require.Empty(t, stats) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestUsageLogRepositoryGetStatsWithFiltersRequestTypePriority(t *testing.T) { + db, mock := newSQLMock(t) + repo := &usageLogRepository{sql: db} + + requestType := int16(service.RequestTypeSync) + stream := true + filters := usagestats.UsageLogFilters{ + RequestType: &requestType, + Stream: &stream, + } + + mock.ExpectQuery("FROM usage_logs\\s+WHERE request_type = \\$1"). + WithArgs(requestType). + WillReturnRows(sqlmock.NewRows([]string{ + "total_requests", + "total_input_tokens", + "total_output_tokens", + "total_cache_tokens", + "total_cost", + "total_actual_cost", + "total_account_cost", + "avg_duration_ms", + }).AddRow(int64(1), int64(2), int64(3), int64(4), 1.2, 1.0, 1.2, 20.0)) + + stats, err := repo.GetStatsWithFilters(context.Background(), filters) + require.NoError(t, err) + require.Equal(t, int64(1), stats.TotalRequests) + require.Equal(t, int64(9), stats.TotalTokens) + require.NoError(t, mock.ExpectationsWereMet()) +} + +type usageLogScannerStub struct { + values []any +} + +func (s usageLogScannerStub) Scan(dest ...any) error { + if len(dest) != len(s.values) { + return fmt.Errorf("scan arg count mismatch: got %d want %d", len(dest), len(s.values)) + } + for i := range dest { + dv := reflect.ValueOf(dest[i]) + if dv.Kind() != reflect.Ptr { + return fmt.Errorf("dest[%d] is not pointer", i) + } + dv.Elem().Set(reflect.ValueOf(s.values[i])) + } + return nil +} + +func TestScanUsageLogRequestTypeAndLegacyFallback(t *testing.T) { + t.Run("request_type_ws_v2_overrides_legacy", func(t *testing.T) { + now := time.Now().UTC() + log, err := scanUsageLog(usageLogScannerStub{values: []any{ + int64(1), // id + int64(10), // user_id + int64(20), // api_key_id + int64(30), // account_id + sql.NullString{Valid: true, String: "req-1"}, + "gpt-5", // model + sql.NullInt64{}, // group_id + sql.NullInt64{}, // subscription_id + 1, // input_tokens + 2, // output_tokens + 3, // cache_creation_tokens + 4, // cache_read_tokens + 5, // cache_creation_5m_tokens + 6, // cache_creation_1h_tokens + 0.1, // input_cost + 0.2, // output_cost + 0.3, // cache_creation_cost + 0.4, // cache_read_cost + 1.0, // total_cost + 0.9, // actual_cost + 1.0, // rate_multiplier + sql.NullFloat64{}, // account_rate_multiplier + int16(service.BillingTypeBalance), + int16(service.RequestTypeWSV2), + false, // legacy stream + false, // legacy openai ws + sql.NullInt64{}, + sql.NullInt64{}, + sql.NullString{}, + sql.NullString{}, + 0, + sql.NullString{}, + sql.NullString{}, + sql.NullString{}, + false, + now, + }}) + require.NoError(t, err) + require.Equal(t, service.RequestTypeWSV2, log.RequestType) + require.True(t, log.Stream) + require.True(t, log.OpenAIWSMode) + }) + + t.Run("request_type_unknown_falls_back_to_legacy", func(t *testing.T) { + now := time.Now().UTC() + log, err := scanUsageLog(usageLogScannerStub{values: []any{ + int64(2), + int64(11), + int64(21), + int64(31), + sql.NullString{Valid: true, String: "req-2"}, + "gpt-5", + sql.NullInt64{}, + sql.NullInt64{}, + 1, 2, 3, 4, 5, 6, + 0.1, 0.2, 0.3, 0.4, 1.0, 0.9, + 1.0, + sql.NullFloat64{}, + int16(service.BillingTypeBalance), + int16(service.RequestTypeUnknown), + true, + false, + sql.NullInt64{}, + sql.NullInt64{}, + sql.NullString{}, + sql.NullString{}, + 0, + sql.NullString{}, + sql.NullString{}, + sql.NullString{}, + false, + now, + }}) + require.NoError(t, err) + require.Equal(t, service.RequestTypeStream, log.RequestType) + require.True(t, log.Stream) + require.False(t, log.OpenAIWSMode) + }) +} diff --git a/backend/internal/server/api_contract_test.go b/backend/internal/server/api_contract_test.go index 76897bc15..86e19429e 100644 --- a/backend/internal/server/api_contract_test.go +++ b/backend/internal/server/api_contract_test.go @@ -1555,11 +1555,11 @@ func (r *stubUsageLogRepo) GetDashboardStats(ctx context.Context) (*usagestats.D return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { +func (r *stubUsageLogRepo) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { return nil, errors.New("not implemented") } -func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { +func (r *stubUsageLogRepo) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { return nil, errors.New("not implemented") } diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go index a363a7901..35d369653 100644 --- a/backend/internal/service/account_usage_service.go +++ b/backend/internal/service/account_usage_service.go @@ -33,8 +33,8 @@ type UsageLogRepository interface { // Admin dashboard stats GetDashboardStats(ctx context.Context) (*usagestats.DashboardStats, error) - GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) - GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) + GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) + GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) GetAPIKeyUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.APIKeyUsageTrendPoint, error) GetUserUsageTrend(ctx context.Context, startTime, endTime time.Time, granularity string, limit int) ([]usagestats.UserUsageTrendPoint, error) GetBatchUserUsageStats(ctx context.Context, userIDs []int64, startTime, endTime time.Time) (map[int64]*usagestats.BatchUserUsageStats, error) @@ -297,7 +297,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou } dayStart := geminiDailyWindowStart(now) - stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil, nil) + stats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, dayStart, now, 0, 0, account.ID, 0, nil, nil, nil) if err != nil { return nil, fmt.Errorf("get gemini usage stats failed: %w", err) } @@ -319,7 +319,7 @@ func (s *AccountUsageService) getGeminiUsage(ctx context.Context, account *Accou // Minute window (RPM) - fixed-window approximation: current minute [truncate(now), truncate(now)+1m) minuteStart := now.Truncate(time.Minute) minuteResetAt := minuteStart.Add(time.Minute) - minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil, nil) + minuteStats, err := s.usageLogRepo.GetModelStatsWithFilters(ctx, minuteStart, now, 0, 0, account.ID, 0, nil, nil, nil) if err != nil { return nil, fmt.Errorf("get gemini minute usage stats failed: %w", err) } diff --git a/backend/internal/service/dashboard_service.go b/backend/internal/service/dashboard_service.go index 9aab10d20..4528def3d 100644 --- a/backend/internal/service/dashboard_service.go +++ b/backend/internal/service/dashboard_service.go @@ -124,16 +124,16 @@ func (s *DashboardService) GetDashboardStats(ctx context.Context) (*usagestats.D return stats, nil } -func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { - trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, stream, billingType) +func (s *DashboardService) GetUsageTrendWithFilters(ctx context.Context, startTime, endTime time.Time, granularity string, userID, apiKeyID, accountID, groupID int64, model string, requestType *int16, stream *bool, billingType *int8) ([]usagestats.TrendDataPoint, error) { + trend, err := s.usageRepo.GetUsageTrendWithFilters(ctx, startTime, endTime, granularity, userID, apiKeyID, accountID, groupID, model, requestType, stream, billingType) if err != nil { return nil, fmt.Errorf("get usage trend with filters: %w", err) } return trend, nil } -func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, stream, billingType) +func (s *DashboardService) GetModelStatsWithFilters(ctx context.Context, startTime, endTime time.Time, userID, apiKeyID, accountID, groupID int64, requestType *int16, stream *bool, billingType *int8) ([]usagestats.ModelStat, error) { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, startTime, endTime, userID, apiKeyID, accountID, groupID, requestType, stream, billingType) if err != nil { return nil, fmt.Errorf("get model stats with filters: %w", err) } diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index fcc7c4a0c..812e059a9 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -225,7 +225,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, start := geminiDailyWindowStart(now) totals, ok := s.getGeminiUsageTotals(account.ID, start, now) if !ok { - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil, nil) if err != nil { return true, err } @@ -272,7 +272,7 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, if limit > 0 { start := now.Truncate(time.Minute) - stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil) + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, now, 0, 0, account.ID, 0, nil, nil, nil) if err != nil { return true, err } diff --git a/backend/internal/service/usage_cleanup.go b/backend/internal/service/usage_cleanup.go index 7e3ffbb95..6e32f3c08 100644 --- a/backend/internal/service/usage_cleanup.go +++ b/backend/internal/service/usage_cleanup.go @@ -33,6 +33,7 @@ type UsageCleanupFilters struct { AccountID *int64 `json:"account_id,omitempty"` GroupID *int64 `json:"group_id,omitempty"` Model *string `json:"model,omitempty"` + RequestType *int16 `json:"request_type,omitempty"` Stream *bool `json:"stream,omitempty"` BillingType *int8 `json:"billing_type,omitempty"` } diff --git a/backend/internal/service/usage_cleanup_service.go b/backend/internal/service/usage_cleanup_service.go index ee795aa4c..5600542e2 100644 --- a/backend/internal/service/usage_cleanup_service.go +++ b/backend/internal/service/usage_cleanup_service.go @@ -68,6 +68,9 @@ func describeUsageCleanupFilters(filters UsageCleanupFilters) string { if filters.Model != nil { parts = append(parts, "model="+strings.TrimSpace(*filters.Model)) } + if filters.RequestType != nil { + parts = append(parts, "request_type="+RequestTypeFromInt16(*filters.RequestType).String()) + } if filters.Stream != nil { parts = append(parts, fmt.Sprintf("stream=%t", *filters.Stream)) } @@ -368,6 +371,16 @@ func sanitizeUsageCleanupFilters(filters *UsageCleanupFilters) { filters.Model = &model } } + if filters.RequestType != nil { + requestType := RequestType(*filters.RequestType) + if !requestType.IsValid() { + filters.RequestType = nil + } else { + value := int16(requestType.Normalize()) + filters.RequestType = &value + filters.Stream = nil + } + } if filters.BillingType != nil && *filters.BillingType < 0 { filters.BillingType = nil } diff --git a/backend/internal/service/usage_cleanup_service_test.go b/backend/internal/service/usage_cleanup_service_test.go index 1f9f47761..0fdbfd47f 100644 --- a/backend/internal/service/usage_cleanup_service_test.go +++ b/backend/internal/service/usage_cleanup_service_test.go @@ -257,6 +257,53 @@ func TestUsageCleanupServiceCreateTaskSanitizeFilters(t *testing.T) { require.Equal(t, int64(9), task.CreatedBy) } +func TestSanitizeUsageCleanupFiltersRequestTypePriority(t *testing.T) { + requestType := int16(RequestTypeWSV2) + stream := false + model := " gpt-5 " + filters := UsageCleanupFilters{ + Model: &model, + RequestType: &requestType, + Stream: &stream, + } + + sanitizeUsageCleanupFilters(&filters) + + require.NotNil(t, filters.RequestType) + require.Equal(t, int16(RequestTypeWSV2), *filters.RequestType) + require.Nil(t, filters.Stream) + require.NotNil(t, filters.Model) + require.Equal(t, "gpt-5", *filters.Model) +} + +func TestSanitizeUsageCleanupFiltersInvalidRequestType(t *testing.T) { + requestType := int16(99) + stream := true + filters := UsageCleanupFilters{ + RequestType: &requestType, + Stream: &stream, + } + + sanitizeUsageCleanupFilters(&filters) + + require.Nil(t, filters.RequestType) + require.NotNil(t, filters.Stream) + require.True(t, *filters.Stream) +} + +func TestDescribeUsageCleanupFiltersIncludesRequestType(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + requestType := int16(RequestTypeWSV2) + desc := describeUsageCleanupFilters(UsageCleanupFilters{ + StartTime: start, + EndTime: end, + RequestType: &requestType, + }) + + require.Contains(t, desc, "request_type=ws_v2") +} + func TestUsageCleanupServiceCreateTaskInvalidCreator(t *testing.T) { repo := &cleanupRepoStub{} cfg := &config.Config{UsageCleanup: config.UsageCleanupConfig{Enabled: true}} diff --git a/backend/internal/service/usage_log.go b/backend/internal/service/usage_log.go index 73748a6d8..c1a95541c 100644 --- a/backend/internal/service/usage_log.go +++ b/backend/internal/service/usage_log.go @@ -1,12 +1,96 @@ package service -import "time" +import ( + "fmt" + "strings" + "time" +) const ( BillingTypeBalance int8 = 0 // 钱包余额 BillingTypeSubscription int8 = 1 // 订阅套餐 ) +type RequestType int16 + +const ( + RequestTypeUnknown RequestType = 0 + RequestTypeSync RequestType = 1 + RequestTypeStream RequestType = 2 + RequestTypeWSV2 RequestType = 3 +) + +func (t RequestType) IsValid() bool { + switch t { + case RequestTypeUnknown, RequestTypeSync, RequestTypeStream, RequestTypeWSV2: + return true + default: + return false + } +} + +func (t RequestType) Normalize() RequestType { + if t.IsValid() { + return t + } + return RequestTypeUnknown +} + +func (t RequestType) String() string { + switch t.Normalize() { + case RequestTypeSync: + return "sync" + case RequestTypeStream: + return "stream" + case RequestTypeWSV2: + return "ws_v2" + default: + return "unknown" + } +} + +func RequestTypeFromInt16(v int16) RequestType { + return RequestType(v).Normalize() +} + +func ParseUsageRequestType(value string) (RequestType, error) { + switch strings.ToLower(strings.TrimSpace(value)) { + case "unknown": + return RequestTypeUnknown, nil + case "sync": + return RequestTypeSync, nil + case "stream": + return RequestTypeStream, nil + case "ws_v2": + return RequestTypeWSV2, nil + default: + return RequestTypeUnknown, fmt.Errorf("invalid request_type, allowed values: unknown, sync, stream, ws_v2") + } +} + +func RequestTypeFromLegacy(stream bool, openAIWSMode bool) RequestType { + if openAIWSMode { + return RequestTypeWSV2 + } + if stream { + return RequestTypeStream + } + return RequestTypeSync +} + +func ApplyLegacyRequestFields(requestType RequestType, fallbackStream bool, fallbackOpenAIWSMode bool) (stream bool, openAIWSMode bool) { + switch requestType.Normalize() { + case RequestTypeSync: + return false, false + case RequestTypeStream: + return true, false + case RequestTypeWSV2: + return true, true + default: + return fallbackStream, fallbackOpenAIWSMode + } +} + type UsageLog struct { ID int64 UserID int64 @@ -40,6 +124,7 @@ type UsageLog struct { AccountRateMultiplier *float64 BillingType int8 + RequestType RequestType Stream bool OpenAIWSMode bool DurationMs *int @@ -67,3 +152,22 @@ type UsageLog struct { func (u *UsageLog) TotalTokens() int { return u.InputTokens + u.OutputTokens + u.CacheCreationTokens + u.CacheReadTokens } + +func (u *UsageLog) EffectiveRequestType() RequestType { + if u == nil { + return RequestTypeUnknown + } + if normalized := u.RequestType.Normalize(); normalized != RequestTypeUnknown { + return normalized + } + return RequestTypeFromLegacy(u.Stream, u.OpenAIWSMode) +} + +func (u *UsageLog) SyncRequestTypeAndLegacyFields() { + if u == nil { + return + } + requestType := u.EffectiveRequestType() + u.RequestType = requestType + u.Stream, u.OpenAIWSMode = ApplyLegacyRequestFields(requestType, u.Stream, u.OpenAIWSMode) +} diff --git a/backend/internal/service/usage_log_test.go b/backend/internal/service/usage_log_test.go new file mode 100644 index 000000000..280237c20 --- /dev/null +++ b/backend/internal/service/usage_log_test.go @@ -0,0 +1,112 @@ +package service + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseUsageRequestType(t *testing.T) { + t.Parallel() + + type testCase struct { + name string + input string + want RequestType + wantErr bool + } + + cases := []testCase{ + {name: "unknown", input: "unknown", want: RequestTypeUnknown}, + {name: "sync", input: "sync", want: RequestTypeSync}, + {name: "stream", input: "stream", want: RequestTypeStream}, + {name: "ws_v2", input: "ws_v2", want: RequestTypeWSV2}, + {name: "case_insensitive", input: "WS_V2", want: RequestTypeWSV2}, + {name: "trim_spaces", input: " stream ", want: RequestTypeStream}, + {name: "invalid", input: "xxx", wantErr: true}, + } + + for _, tc := range cases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + got, err := ParseUsageRequestType(tc.input) + if tc.wantErr { + require.Error(t, err) + return + } + require.NoError(t, err) + require.Equal(t, tc.want, got) + }) + } +} + +func TestRequestTypeNormalizeAndString(t *testing.T) { + t.Parallel() + + require.Equal(t, RequestTypeUnknown, RequestType(99).Normalize()) + require.Equal(t, "unknown", RequestType(99).String()) + require.Equal(t, "sync", RequestTypeSync.String()) + require.Equal(t, "stream", RequestTypeStream.String()) + require.Equal(t, "ws_v2", RequestTypeWSV2.String()) +} + +func TestRequestTypeFromLegacy(t *testing.T) { + t.Parallel() + + require.Equal(t, RequestTypeWSV2, RequestTypeFromLegacy(false, true)) + require.Equal(t, RequestTypeStream, RequestTypeFromLegacy(true, false)) + require.Equal(t, RequestTypeSync, RequestTypeFromLegacy(false, false)) +} + +func TestApplyLegacyRequestFields(t *testing.T) { + t.Parallel() + + stream, ws := ApplyLegacyRequestFields(RequestTypeSync, true, true) + require.False(t, stream) + require.False(t, ws) + + stream, ws = ApplyLegacyRequestFields(RequestTypeStream, false, true) + require.True(t, stream) + require.False(t, ws) + + stream, ws = ApplyLegacyRequestFields(RequestTypeWSV2, false, false) + require.True(t, stream) + require.True(t, ws) + + stream, ws = ApplyLegacyRequestFields(RequestTypeUnknown, true, false) + require.True(t, stream) + require.False(t, ws) +} + +func TestUsageLogSyncRequestTypeAndLegacyFields(t *testing.T) { + t.Parallel() + + log := &UsageLog{RequestType: RequestTypeWSV2, Stream: false, OpenAIWSMode: false} + log.SyncRequestTypeAndLegacyFields() + + require.Equal(t, RequestTypeWSV2, log.RequestType) + require.True(t, log.Stream) + require.True(t, log.OpenAIWSMode) +} + +func TestUsageLogEffectiveRequestTypeFallback(t *testing.T) { + t.Parallel() + + log := &UsageLog{RequestType: RequestTypeUnknown, Stream: true, OpenAIWSMode: true} + require.Equal(t, RequestTypeWSV2, log.EffectiveRequestType()) +} + +func TestUsageLogEffectiveRequestTypeNilReceiver(t *testing.T) { + t.Parallel() + + var log *UsageLog + require.Equal(t, RequestTypeUnknown, log.EffectiveRequestType()) +} + +func TestUsageLogSyncRequestTypeAndLegacyFieldsNilReceiver(t *testing.T) { + t.Parallel() + + var log *UsageLog + log.SyncRequestTypeAndLegacyFields() +} diff --git a/backend/migrations/001_init.sql b/backend/migrations/001_init.sql index 64078c42d..0dae3055f 100644 --- a/backend/migrations/001_init.sql +++ b/backend/migrations/001_init.sql @@ -158,6 +158,7 @@ CREATE TABLE IF NOT EXISTS usage_logs ( actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, -- 实际扣除费用 -- 元数据 + request_type SMALLINT NOT NULL DEFAULT 0 CHECK (request_type IN (0, 1, 2, 3)), stream BOOLEAN NOT NULL DEFAULT FALSE, duration_ms INT, @@ -170,3 +171,4 @@ CREATE INDEX IF NOT EXISTS idx_usage_logs_account_id ON usage_logs(account_id); CREATE INDEX IF NOT EXISTS idx_usage_logs_model ON usage_logs(model); CREATE INDEX IF NOT EXISTS idx_usage_logs_created_at ON usage_logs(created_at); CREATE INDEX IF NOT EXISTS idx_usage_logs_user_created ON usage_logs(user_id, created_at); +CREATE INDEX IF NOT EXISTS idx_usage_logs_request_type_created_at ON usage_logs(request_type, created_at); diff --git a/backend/migrations/061_add_usage_log_request_type.sql b/backend/migrations/061_add_usage_log_request_type.sql new file mode 100644 index 000000000..68a33d510 --- /dev/null +++ b/backend/migrations/061_add_usage_log_request_type.sql @@ -0,0 +1,29 @@ +-- Add request_type enum for usage_logs while keeping legacy stream/openai_ws_mode compatibility. +ALTER TABLE usage_logs + ADD COLUMN IF NOT EXISTS request_type SMALLINT NOT NULL DEFAULT 0; + +DO $$ +BEGIN + IF NOT EXISTS ( + SELECT 1 + FROM pg_constraint + WHERE conname = 'usage_logs_request_type_check' + ) THEN + ALTER TABLE usage_logs + ADD CONSTRAINT usage_logs_request_type_check + CHECK (request_type IN (0, 1, 2, 3)); + END IF; +END +$$; + +CREATE INDEX IF NOT EXISTS idx_usage_logs_request_type_created_at + ON usage_logs (request_type, created_at); + +-- Backfill from legacy fields. openai_ws_mode has higher priority than stream. +UPDATE usage_logs +SET request_type = CASE + WHEN openai_ws_mode = TRUE THEN 3 + WHEN stream = TRUE THEN 2 + ELSE 1 +END +WHERE request_type = 0; diff --git a/frontend/src/api/admin/dashboard.ts b/frontend/src/api/admin/dashboard.ts index ae48bec2f..a5113dd1f 100644 --- a/frontend/src/api/admin/dashboard.ts +++ b/frontend/src/api/admin/dashboard.ts @@ -9,7 +9,8 @@ import type { TrendDataPoint, ModelStat, ApiKeyUsageTrendPoint, - UserUsageTrendPoint + UserUsageTrendPoint, + UsageRequestType } from '@/types' /** @@ -49,6 +50,7 @@ export interface TrendParams { model?: string account_id?: number group_id?: number + request_type?: UsageRequestType stream?: boolean billing_type?: number | null } @@ -78,6 +80,7 @@ export interface ModelStatsParams { model?: string account_id?: number group_id?: number + request_type?: UsageRequestType stream?: boolean billing_type?: number | null } diff --git a/frontend/src/api/admin/usage.ts b/frontend/src/api/admin/usage.ts index 94f7b57b3..66c844107 100644 --- a/frontend/src/api/admin/usage.ts +++ b/frontend/src/api/admin/usage.ts @@ -4,7 +4,7 @@ */ import { apiClient } from '../client' -import type { AdminUsageLog, UsageQueryParams, PaginatedResponse } from '@/types' +import type { AdminUsageLog, UsageQueryParams, PaginatedResponse, UsageRequestType } from '@/types' // ==================== Types ==================== @@ -39,6 +39,7 @@ export interface UsageCleanupFilters { account_id?: number group_id?: number model?: string | null + request_type?: UsageRequestType | null stream?: boolean | null billing_type?: number | null } @@ -66,6 +67,7 @@ export interface CreateUsageCleanupTaskRequest { account_id?: number group_id?: number model?: string | null + request_type?: UsageRequestType | null stream?: boolean | null billing_type?: number | null timezone?: string @@ -104,6 +106,7 @@ export async function getStats(params: { account_id?: number group_id?: number model?: string + request_type?: UsageRequestType stream?: boolean period?: string start_date?: string diff --git a/frontend/src/components/admin/usage/UsageCleanupDialog.vue b/frontend/src/components/admin/usage/UsageCleanupDialog.vue index d5e81e727..3218be30c 100644 --- a/frontend/src/components/admin/usage/UsageCleanupDialog.vue +++ b/frontend/src/components/admin/usage/UsageCleanupDialog.vue @@ -125,6 +125,7 @@ import Pagination from '@/components/common/Pagination.vue' import UsageFilters from '@/components/admin/usage/UsageFilters.vue' import { adminUsageAPI } from '@/api/admin/usage' import type { AdminUsageQueryParams, UsageCleanupTask, CreateUsageCleanupTaskRequest } from '@/api/admin/usage' +import { requestTypeToLegacyStream } from '@/utils/usageRequestType' interface Props { show: boolean @@ -310,7 +311,13 @@ const buildPayload = (): CreateUsageCleanupTaskRequest | null => { if (localFilters.value.model) { payload.model = localFilters.value.model } - if (localFilters.value.stream !== null && localFilters.value.stream !== undefined) { + if (localFilters.value.request_type) { + payload.request_type = localFilters.value.request_type + const legacyStream = requestTypeToLegacyStream(localFilters.value.request_type) + if (legacyStream !== null && legacyStream !== undefined) { + payload.stream = legacyStream + } + } else if (localFilters.value.stream !== null && localFilters.value.stream !== undefined) { payload.stream = localFilters.value.stream } if (localFilters.value.billing_type !== null && localFilters.value.billing_type !== undefined) { diff --git a/frontend/src/components/admin/usage/UsageFilters.vue b/frontend/src/components/admin/usage/UsageFilters.vue index 9bdf69216..2f4b8245c 100644 --- a/frontend/src/components/admin/usage/UsageFilters.vue +++ b/frontend/src/components/admin/usage/UsageFilters.vue @@ -121,10 +121,10 @@
- +
-
@@ -232,10 +232,11 @@ let accountSearchTimeout: ReturnType | null = null const modelOptions = ref([{ value: null, label: t('admin.usage.allModels') }]) const groupOptions = ref([{ value: null, label: t('admin.usage.allGroups') }]) -const streamTypeOptions = ref([ +const requestTypeOptions = ref([ { value: null, label: t('admin.usage.allTypes') }, - { value: true, label: t('usage.stream') }, - { value: false, label: t('usage.sync') } + { value: 'ws_v2', label: t('usage.ws') }, + { value: 'stream', label: t('usage.stream') }, + { value: 'sync', label: t('usage.sync') } ]) const billingTypeOptions = ref([ diff --git a/frontend/src/components/admin/usage/UsageTable.vue b/frontend/src/components/admin/usage/UsageTable.vue index 4ef5e1a79..828683a3c 100644 --- a/frontend/src/components/admin/usage/UsageTable.vue +++ b/frontend/src/components/admin/usage/UsageTable.vue @@ -271,6 +271,7 @@ import { ref, computed } from 'vue' import { useI18n } from 'vue-i18n' import { formatDateTime, formatReasoningEffort } from '@/utils/format' +import { resolveUsageRequestType } from '@/utils/usageRequestType' import DataTable from '@/components/common/DataTable.vue' import EmptyState from '@/components/common/EmptyState.vue' import Icon from '@/components/icons/Icon.vue' @@ -307,15 +308,19 @@ const cols = computed(() => [ ]) const getRequestTypeLabel = (row: AdminUsageLog): string => { - if (row.openai_ws_mode) return t('usage.ws') - return row.stream ? t('usage.stream') : t('usage.sync') + const requestType = resolveUsageRequestType(row) + if (requestType === 'ws_v2') return t('usage.ws') + if (requestType === 'stream') return t('usage.stream') + if (requestType === 'sync') return t('usage.sync') + return t('usage.unknown') } const getRequestTypeBadgeClass = (row: AdminUsageLog): string => { - if (row.openai_ws_mode) return 'bg-violet-100 text-violet-800 dark:bg-violet-900 dark:text-violet-200' - return row.stream - ? 'bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200' - : 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200' + const requestType = resolveUsageRequestType(row) + if (requestType === 'ws_v2') return 'bg-violet-100 text-violet-800 dark:bg-violet-900 dark:text-violet-200' + if (requestType === 'stream') return 'bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200' + if (requestType === 'sync') return 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200' + return 'bg-amber-100 text-amber-800 dark:bg-amber-900 dark:text-amber-200' } const formatCacheTokens = (tokens: number): string => { diff --git a/frontend/src/i18n/locales/en.ts b/frontend/src/i18n/locales/en.ts index e5a0233e6..ed39dd4bc 100644 --- a/frontend/src/i18n/locales/en.ts +++ b/frontend/src/i18n/locales/en.ts @@ -616,6 +616,7 @@ export default { ws: 'WS', stream: 'Stream', sync: 'Sync', + unknown: 'Unknown', in: 'In', out: 'Out', cacheRead: 'Read', diff --git a/frontend/src/i18n/locales/zh.ts b/frontend/src/i18n/locales/zh.ts index 951944e44..607bc5b0d 100644 --- a/frontend/src/i18n/locales/zh.ts +++ b/frontend/src/i18n/locales/zh.ts @@ -622,6 +622,7 @@ export default { ws: 'WS', stream: '流式', sync: '同步', + unknown: '未知', in: '输入', out: '输出', cacheRead: '读取', diff --git a/frontend/src/types/index.ts b/frontend/src/types/index.ts index 7db66f0c9..e5e01c673 100644 --- a/frontend/src/types/index.ts +++ b/frontend/src/types/index.ts @@ -859,6 +859,7 @@ export interface AdminDataImportResult { // ==================== Usage & Redeem Types ==================== export type RedeemCodeType = 'balance' | 'concurrency' | 'subscription' | 'invitation' +export type UsageRequestType = 'unknown' | 'sync' | 'stream' | 'ws_v2' export interface UsageLog { id: number @@ -888,6 +889,7 @@ export interface UsageLog { rate_multiplier: number billing_type: number + request_type?: UsageRequestType stream: boolean openai_ws_mode?: boolean duration_ms: number @@ -935,6 +937,7 @@ export interface UsageCleanupFilters { account_id?: number group_id?: number model?: string | null + request_type?: UsageRequestType | null stream?: boolean | null billing_type?: number | null } @@ -1179,6 +1182,7 @@ export interface UsageQueryParams { account_id?: number group_id?: number model?: string + request_type?: UsageRequestType stream?: boolean billing_type?: number | null start_date?: string diff --git a/frontend/src/utils/usageRequestType.ts b/frontend/src/utils/usageRequestType.ts new file mode 100644 index 000000000..bfdafb075 --- /dev/null +++ b/frontend/src/utils/usageRequestType.ts @@ -0,0 +1,33 @@ +import type { UsageRequestType } from '@/types' + +export interface UsageRequestTypeLike { + request_type?: string | null + stream?: boolean | null + openai_ws_mode?: boolean | null +} + +const VALID_REQUEST_TYPES = new Set(['unknown', 'sync', 'stream', 'ws_v2']) + +export const isUsageRequestType = (value: unknown): value is UsageRequestType => { + return typeof value === 'string' && VALID_REQUEST_TYPES.has(value as UsageRequestType) +} + +export const resolveUsageRequestType = (value: UsageRequestTypeLike): UsageRequestType => { + if (isUsageRequestType(value.request_type)) { + return value.request_type + } + if (value.openai_ws_mode) { + return 'ws_v2' + } + return value.stream ? 'stream' : 'sync' +} + +export const requestTypeToLegacyStream = (requestType?: UsageRequestType | null): boolean | null | undefined => { + if (!requestType || requestType === 'unknown') { + return null + } + if (requestType === 'sync') { + return false + } + return true +} diff --git a/frontend/src/views/admin/UsageView.vue b/frontend/src/views/admin/UsageView.vue index 9c4544ecd..61e5f8881 100644 --- a/frontend/src/views/admin/UsageView.vue +++ b/frontend/src/views/admin/UsageView.vue @@ -38,6 +38,7 @@ import { useI18n } from 'vue-i18n' import { saveAs } from 'file-saver' import { useAppStore } from '@/stores/app'; import { adminAPI } from '@/api/admin'; import { adminUsageAPI } from '@/api/admin/usage' import { formatReasoningEffort } from '@/utils/format' +import { resolveUsageRequestType, requestTypeToLegacyStream } from '@/utils/usageRequestType' import AppLayout from '@/components/layout/AppLayout.vue'; import Pagination from '@/components/common/Pagination.vue'; import Select from '@/components/common/Select.vue' import UsageStatsCards from '@/components/admin/usage/UsageStatsCards.vue'; import UsageFilters from '@/components/admin/usage/UsageFilters.vue' import UsageTable from '@/components/admin/usage/UsageTable.vue'; import UsageExportProgress from '@/components/admin/usage/UsageExportProgress.vue' @@ -63,35 +64,51 @@ const formatLD = (d: Date) => { } const now = new Date(); const weekAgo = new Date(); weekAgo.setDate(weekAgo.getDate() - 6) const startDate = ref(formatLD(weekAgo)); const endDate = ref(formatLD(now)) -const filters = ref({ user_id: undefined, model: undefined, group_id: undefined, billing_type: null, start_date: startDate.value, end_date: endDate.value }) +const filters = ref({ user_id: undefined, model: undefined, group_id: undefined, request_type: undefined, billing_type: null, start_date: startDate.value, end_date: endDate.value }) const pagination = reactive({ page: 1, page_size: 20, total: 0 }) const loadLogs = async () => { abortController?.abort(); const c = new AbortController(); abortController = c; loading.value = true try { - const res = await adminAPI.usage.list({ page: pagination.page, page_size: pagination.page_size, ...filters.value }, { signal: c.signal }) + const requestType = filters.value.request_type + const legacyStream = requestType ? requestTypeToLegacyStream(requestType) : filters.value.stream + const res = await adminAPI.usage.list({ page: pagination.page, page_size: pagination.page_size, ...filters.value, stream: legacyStream === null ? undefined : legacyStream }, { signal: c.signal }) if(!c.signal.aborted) { usageLogs.value = res.items; pagination.total = res.total } } catch (error: any) { if(error?.name !== 'AbortError') console.error('Failed to load usage logs:', error) } finally { if(abortController === c) loading.value = false } } -const loadStats = async () => { try { const s = await adminAPI.usage.getStats(filters.value); usageStats.value = s } catch (error) { console.error('Failed to load usage stats:', error) } } +const loadStats = async () => { + try { + const requestType = filters.value.request_type + const legacyStream = requestType ? requestTypeToLegacyStream(requestType) : filters.value.stream + const s = await adminAPI.usage.getStats({ ...filters.value, stream: legacyStream === null ? undefined : legacyStream }) + usageStats.value = s + } catch (error) { + console.error('Failed to load usage stats:', error) + } +} const loadChartData = async () => { chartsLoading.value = true try { - const params = { start_date: filters.value.start_date || startDate.value, end_date: filters.value.end_date || endDate.value, granularity: granularity.value, user_id: filters.value.user_id, model: filters.value.model, api_key_id: filters.value.api_key_id, account_id: filters.value.account_id, group_id: filters.value.group_id, stream: filters.value.stream, billing_type: filters.value.billing_type } - const [trendRes, modelRes] = await Promise.all([adminAPI.dashboard.getUsageTrend(params), adminAPI.dashboard.getModelStats({ start_date: params.start_date, end_date: params.end_date, user_id: params.user_id, model: params.model, api_key_id: params.api_key_id, account_id: params.account_id, group_id: params.group_id, stream: params.stream, billing_type: params.billing_type })]) + const requestType = filters.value.request_type + const legacyStream = requestType ? requestTypeToLegacyStream(requestType) : filters.value.stream + const params = { start_date: filters.value.start_date || startDate.value, end_date: filters.value.end_date || endDate.value, granularity: granularity.value, user_id: filters.value.user_id, model: filters.value.model, api_key_id: filters.value.api_key_id, account_id: filters.value.account_id, group_id: filters.value.group_id, request_type: requestType, stream: legacyStream === null ? undefined : legacyStream, billing_type: filters.value.billing_type } + const [trendRes, modelRes] = await Promise.all([adminAPI.dashboard.getUsageTrend(params), adminAPI.dashboard.getModelStats({ start_date: params.start_date, end_date: params.end_date, user_id: params.user_id, model: params.model, api_key_id: params.api_key_id, account_id: params.account_id, group_id: params.group_id, request_type: params.request_type, stream: params.stream, billing_type: params.billing_type })]) trendData.value = trendRes.trend || []; modelStats.value = modelRes.models || [] } catch (error) { console.error('Failed to load chart data:', error) } finally { chartsLoading.value = false } } const applyFilters = () => { pagination.page = 1; loadLogs(); loadStats(); loadChartData() } const refreshData = () => { loadLogs(); loadStats(); loadChartData() } -const resetFilters = () => { startDate.value = formatLD(weekAgo); endDate.value = formatLD(now); filters.value = { start_date: startDate.value, end_date: endDate.value, billing_type: null }; granularity.value = 'day'; applyFilters() } +const resetFilters = () => { startDate.value = formatLD(weekAgo); endDate.value = formatLD(now); filters.value = { start_date: startDate.value, end_date: endDate.value, request_type: undefined, billing_type: null }; granularity.value = 'day'; applyFilters() } const handlePageChange = (p: number) => { pagination.page = p; loadLogs() } const handlePageSizeChange = (s: number) => { pagination.page_size = s; pagination.page = 1; loadLogs() } const cancelExport = () => exportAbortController?.abort() const openCleanupDialog = () => { cleanupDialogVisible.value = true } const getRequestTypeLabel = (log: AdminUsageLog): string => { - if (log.openai_ws_mode) return t('usage.ws') - return log.stream ? t('usage.stream') : t('usage.sync') + const requestType = resolveUsageRequestType(log) + if (requestType === 'ws_v2') return t('usage.ws') + if (requestType === 'stream') return t('usage.stream') + if (requestType === 'sync') return t('usage.sync') + return t('usage.unknown') } const exportToExcel = async () => { @@ -114,7 +131,9 @@ const exportToExcel = async () => { ] const ws = XLSX.utils.aoa_to_sheet([headers]) while (true) { - const res = await adminUsageAPI.list({ page: p, page_size: 100, ...filters.value }, { signal: c.signal }) + const requestType = filters.value.request_type + const legacyStream = requestType ? requestTypeToLegacyStream(requestType) : filters.value.stream + const res = await adminUsageAPI.list({ page: p, page_size: 100, ...filters.value, stream: legacyStream === null ? undefined : legacyStream }, { signal: c.signal }) if (c.signal.aborted) break; if (p === 1) { total = res.total; exportProgress.total = total } const rows = (res.items || []).map((log: AdminUsageLog) => [ log.created_at, log.user?.email || '', log.api_key?.name || '', log.account?.name || '', log.model, diff --git a/frontend/src/views/user/UsageView.vue b/frontend/src/views/user/UsageView.vue index f4046918c..ff8753254 100644 --- a/frontend/src/views/user/UsageView.vue +++ b/frontend/src/views/user/UsageView.vue @@ -469,12 +469,13 @@ import TablePageLayout from '@/components/layout/TablePageLayout.vue' import DataTable from '@/components/common/DataTable.vue' import Pagination from '@/components/common/Pagination.vue' import EmptyState from '@/components/common/EmptyState.vue' - import Select from '@/components/common/Select.vue' - import DateRangePicker from '@/components/common/DateRangePicker.vue' - import Icon from '@/components/icons/Icon.vue' - import type { UsageLog, ApiKey, UsageQueryParams, UsageStatsResponse } from '@/types' - import type { Column } from '@/components/common/types' - import { formatDateTime, formatReasoningEffort } from '@/utils/format' +import Select from '@/components/common/Select.vue' +import DateRangePicker from '@/components/common/DateRangePicker.vue' +import Icon from '@/components/icons/Icon.vue' +import type { UsageLog, ApiKey, UsageQueryParams, UsageStatsResponse } from '@/types' +import type { Column } from '@/components/common/types' +import { formatDateTime, formatReasoningEffort } from '@/utils/format' +import { resolveUsageRequestType } from '@/utils/usageRequestType' const { t } = useI18n() const appStore = useAppStore() @@ -574,15 +575,27 @@ const formatUserAgent = (ua: string): string => { } const getRequestTypeLabel = (log: UsageLog): string => { - if (log.openai_ws_mode) return t('usage.ws') - return log.stream ? t('usage.stream') : t('usage.sync') + const requestType = resolveUsageRequestType(log) + if (requestType === 'ws_v2') return t('usage.ws') + if (requestType === 'stream') return t('usage.stream') + if (requestType === 'sync') return t('usage.sync') + return t('usage.unknown') } const getRequestTypeBadgeClass = (log: UsageLog): string => { - if (log.openai_ws_mode) return 'bg-violet-100 text-violet-800 dark:bg-violet-900 dark:text-violet-200' - return log.stream - ? 'bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200' - : 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200' + const requestType = resolveUsageRequestType(log) + if (requestType === 'ws_v2') return 'bg-violet-100 text-violet-800 dark:bg-violet-900 dark:text-violet-200' + if (requestType === 'stream') return 'bg-blue-100 text-blue-800 dark:bg-blue-900 dark:text-blue-200' + if (requestType === 'sync') return 'bg-gray-100 text-gray-800 dark:bg-gray-700 dark:text-gray-200' + return 'bg-amber-100 text-amber-800 dark:bg-amber-900 dark:text-amber-200' +} + +const getRequestTypeExportText = (log: UsageLog): string => { + const requestType = resolveUsageRequestType(log) + if (requestType === 'ws_v2') return 'WS' + if (requestType === 'stream') return 'Stream' + if (requestType === 'sync') return 'Sync' + return 'Unknown' } const formatTokens = (value: number): string => { @@ -776,7 +789,7 @@ const exportToCSV = async () => { log.api_key?.name || '', log.model, formatReasoningEffort(log.reasoning_effort), - log.openai_ws_mode ? 'WS' : log.stream ? 'Stream' : 'Sync', + getRequestTypeExportText(log), log.input_tokens, log.output_tokens, log.cache_read_tokens, From 7606f0e21ee8c169b73f8678374a80f6a1e56315 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 14:26:34 +0800 Subject: [PATCH 020/120] =?UTF-8?q?fix(repository):=20=E4=BF=AE=E5=A4=8Dre?= =?UTF-8?q?quest=5Ftype=E5=85=BC=E5=AE=B9=E8=BF=87=E6=BB=A4=E5=B9=B6?= =?UTF-8?q?=E5=9B=9E=E9=80=80=E4=B8=8D=E5=8F=AF=E5=8F=98=E8=BF=81=E7=A7=BB?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../handler/openai_gateway_handler.go | 51 ++++++- .../internal/repository/usage_cleanup_repo.go | 7 +- .../repository/usage_cleanup_repo_test.go | 17 ++- backend/internal/repository/usage_log_repo.go | 76 ++++++---- .../usage_log_repo_request_type_test.go | 52 ++++++- .../service/openai_ws_fallback_test.go | 42 ++++++ .../internal/service/openai_ws_forwarder.go | 133 +++++++++++++++++- backend/internal/service/openai_ws_pool.go | 17 ++- backend/migrations/001_init.sql | 2 - 9 files changed, 343 insertions(+), 54 deletions(-) diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 2fee413c3..e7583c04d 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -528,12 +528,22 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { return } reqLog.Info("openai.websocket_ingress_started") + clientIP := ip.GetClientIP(c) + userAgent := strings.TrimSpace(c.GetHeader("User-Agent")) wsConn, err := coderws.Accept(c.Writer, c.Request, &coderws.AcceptOptions{ CompressionMode: coderws.CompressionContextTakeover, }) if err != nil { - reqLog.Warn("openai.websocket_accept_failed", zap.Error(err)) + reqLog.Warn("openai.websocket_accept_failed", + zap.Error(err), + zap.String("client_ip", clientIP), + zap.String("request_user_agent", userAgent), + zap.String("upgrade_header", strings.TrimSpace(c.GetHeader("Upgrade"))), + zap.String("connection_header", strings.TrimSpace(c.GetHeader("Connection"))), + zap.String("sec_websocket_version", strings.TrimSpace(c.GetHeader("Sec-WebSocket-Version"))), + zap.Bool("has_sec_websocket_key", strings.TrimSpace(c.GetHeader("Sec-WebSocket-Key")) != ""), + ) return } defer func() { @@ -546,7 +556,14 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { msgType, firstMessage, err := wsConn.Read(readCtx) cancel() if err != nil { - reqLog.Warn("openai.websocket_read_first_message_failed", zap.Error(err)) + closeStatus, closeReason := summarizeWSCloseErrorForLog(err) + reqLog.Warn("openai.websocket_read_first_message_failed", + zap.Error(err), + zap.String("client_ip", clientIP), + zap.String("close_status", closeStatus), + zap.String("close_reason", closeReason), + zap.Duration("read_timeout", 30*time.Second), + ) closeOpenAIClientWS(wsConn, coderws.StatusPolicyViolation, "missing first response.create message") return } @@ -667,8 +684,6 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { zap.Int("candidate_count", scheduleDecision.CandidateCount), ) - userAgent := c.GetHeader("User-Agent") - clientIP := ip.GetClientIP(c) hooks := &service.OpenAIWSIngressHooks{ BeforeTurn: func(turn int) error { if turn == 1 { @@ -730,7 +745,13 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { if err := h.gatewayService.ProxyResponsesWebSocketFromClient(ctx, c, wsConn, account, token, firstMessage, hooks); err != nil { h.gatewayService.ReportOpenAIAccountScheduleResult(account.ID, false, nil) - reqLog.Warn("openai.websocket_proxy_failed", zap.Int64("account_id", account.ID), zap.Error(err)) + closeStatus, closeReason := summarizeWSCloseErrorForLog(err) + reqLog.Warn("openai.websocket_proxy_failed", + zap.Int64("account_id", account.ID), + zap.Error(err), + zap.String("close_status", closeStatus), + zap.String("close_reason", closeReason), + ) var closeErr *service.OpenAIWSClientCloseError if errors.As(err, &closeErr) { closeOpenAIClientWS(wsConn, closeErr.StatusCode(), closeErr.Reason()) @@ -969,3 +990,23 @@ func closeOpenAIClientWS(conn *coderws.Conn, status coderws.StatusCode, reason s _ = conn.Close(status, reason) _ = conn.CloseNow() } + +func summarizeWSCloseErrorForLog(err error) (string, string) { + if err == nil { + return "-", "-" + } + statusCode := coderws.CloseStatus(err) + if statusCode == -1 { + return "-", "-" + } + closeStatus := fmt.Sprintf("%d(%s)", int(statusCode), statusCode.String()) + closeReason := "-" + var closeErr coderws.CloseError + if errors.As(err, &closeErr) { + reason := strings.TrimSpace(closeErr.Reason) + if reason != "" { + closeReason = reason + } + } + return closeStatus, closeReason +} diff --git a/backend/internal/repository/usage_cleanup_repo.go b/backend/internal/repository/usage_cleanup_repo.go index 30e70569d..1a25696e4 100644 --- a/backend/internal/repository/usage_cleanup_repo.go +++ b/backend/internal/repository/usage_cleanup_repo.go @@ -363,9 +363,10 @@ func buildUsageCleanupWhere(filters service.UsageCleanupFilters) (string, []any) } } if filters.RequestType != nil { - conditions = append(conditions, fmt.Sprintf("request_type = $%d", idx)) - args = append(args, *filters.RequestType) - idx++ + condition, conditionArgs := buildRequestTypeFilterCondition(idx, *filters.RequestType) + conditions = append(conditions, condition) + args = append(args, conditionArgs...) + idx += len(conditionArgs) } else if filters.Stream != nil { conditions = append(conditions, fmt.Sprintf("stream = $%d", idx)) args = append(args, *filters.Stream) diff --git a/backend/internal/repository/usage_cleanup_repo_test.go b/backend/internal/repository/usage_cleanup_repo_test.go index 788f2fd31..1ac7cca56 100644 --- a/backend/internal/repository/usage_cleanup_repo_test.go +++ b/backend/internal/repository/usage_cleanup_repo_test.go @@ -479,7 +479,22 @@ func TestBuildUsageCleanupWhereRequestTypePriority(t *testing.T) { Stream: &stream, }) - require.Equal(t, "created_at >= $1 AND created_at <= $2 AND request_type = $3", where) + require.Equal(t, "created_at >= $1 AND created_at <= $2 AND (request_type = $3 OR (request_type = 0 AND openai_ws_mode = TRUE))", where) + require.Equal(t, []any{start, end, requestType}, args) +} + +func TestBuildUsageCleanupWhereRequestTypeLegacyFallback(t *testing.T) { + start := time.Date(2024, 1, 1, 0, 0, 0, 0, time.UTC) + end := start.Add(24 * time.Hour) + requestType := int16(service.RequestTypeStream) + + where, args := buildUsageCleanupWhere(service.UsageCleanupFilters{ + StartTime: start, + EndTime: end, + RequestType: &requestType, + }) + + require.Equal(t, "created_at >= $1 AND created_at <= $2 AND (request_type = $3 OR (request_type = 0 AND stream = TRUE AND openai_ws_mode = FALSE))", where) require.Equal(t, []any{start, end, requestType}, args) } diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index 18a782ab9..2b4e061a7 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -1405,13 +1405,7 @@ func (r *usageLogRepository) ListWithFilters(ctx context.Context, params paginat conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) args = append(args, filters.Model) } - if filters.RequestType != nil { - conditions = append(conditions, fmt.Sprintf("request_type = $%d", len(args)+1)) - args = append(args, *filters.RequestType) - } else if filters.Stream != nil { - conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) - args = append(args, *filters.Stream) - } + conditions, args = appendRequestTypeOrStreamWhereCondition(conditions, args, filters.RequestType, filters.Stream) if filters.BillingType != nil { conditions = append(conditions, fmt.Sprintf("billing_type = $%d", len(args)+1)) args = append(args, int16(*filters.BillingType)) @@ -1645,13 +1639,7 @@ func (r *usageLogRepository) GetUsageTrendWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND model = $%d", len(args)+1) args = append(args, model) } - if requestType != nil { - query += fmt.Sprintf(" AND request_type = $%d", len(args)+1) - args = append(args, *requestType) - } else if stream != nil { - query += fmt.Sprintf(" AND stream = $%d", len(args)+1) - args = append(args, *stream) - } + query, args = appendRequestTypeOrStreamQueryFilter(query, args, requestType, stream) if billingType != nil { query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1) args = append(args, int16(*billingType)) @@ -1716,13 +1704,7 @@ func (r *usageLogRepository) GetModelStatsWithFilters(ctx context.Context, start query += fmt.Sprintf(" AND group_id = $%d", len(args)+1) args = append(args, groupID) } - if requestType != nil { - query += fmt.Sprintf(" AND request_type = $%d", len(args)+1) - args = append(args, *requestType) - } else if stream != nil { - query += fmt.Sprintf(" AND stream = $%d", len(args)+1) - args = append(args, *stream) - } + query, args = appendRequestTypeOrStreamQueryFilter(query, args, requestType, stream) if billingType != nil { query += fmt.Sprintf(" AND billing_type = $%d", len(args)+1) args = append(args, int16(*billingType)) @@ -1809,13 +1791,7 @@ func (r *usageLogRepository) GetStatsWithFilters(ctx context.Context, filters Us conditions = append(conditions, fmt.Sprintf("model = $%d", len(args)+1)) args = append(args, filters.Model) } - if filters.RequestType != nil { - conditions = append(conditions, fmt.Sprintf("request_type = $%d", len(args)+1)) - args = append(args, *filters.RequestType) - } else if filters.Stream != nil { - conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) - args = append(args, *filters.Stream) - } + conditions, args = appendRequestTypeOrStreamWhereCondition(conditions, args, filters.RequestType, filters.Stream) if filters.BillingType != nil { conditions = append(conditions, fmt.Sprintf("billing_type = $%d", len(args)+1)) args = append(args, int16(*filters.BillingType)) @@ -2465,6 +2441,50 @@ func buildWhere(conditions []string) string { return "WHERE " + strings.Join(conditions, " AND ") } +func appendRequestTypeOrStreamWhereCondition(conditions []string, args []any, requestType *int16, stream *bool) ([]string, []any) { + if requestType != nil { + condition, conditionArgs := buildRequestTypeFilterCondition(len(args)+1, *requestType) + conditions = append(conditions, condition) + args = append(args, conditionArgs...) + return conditions, args + } + if stream != nil { + conditions = append(conditions, fmt.Sprintf("stream = $%d", len(args)+1)) + args = append(args, *stream) + } + return conditions, args +} + +func appendRequestTypeOrStreamQueryFilter(query string, args []any, requestType *int16, stream *bool) (string, []any) { + if requestType != nil { + condition, conditionArgs := buildRequestTypeFilterCondition(len(args)+1, *requestType) + query += " AND " + condition + args = append(args, conditionArgs...) + return query, args + } + if stream != nil { + query += fmt.Sprintf(" AND stream = $%d", len(args)+1) + args = append(args, *stream) + } + return query, args +} + +// buildRequestTypeFilterCondition 在 request_type 过滤时兼容 legacy 字段,避免历史数据漏查。 +func buildRequestTypeFilterCondition(startArgIndex int, requestType int16) (string, []any) { + normalized := service.RequestTypeFromInt16(requestType) + requestTypeArg := int16(normalized) + switch normalized { + case service.RequestTypeSync: + return fmt.Sprintf("(request_type = $%d OR (request_type = %d AND stream = FALSE AND openai_ws_mode = FALSE))", startArgIndex, int16(service.RequestTypeUnknown)), []any{requestTypeArg} + case service.RequestTypeStream: + return fmt.Sprintf("(request_type = $%d OR (request_type = %d AND stream = TRUE AND openai_ws_mode = FALSE))", startArgIndex, int16(service.RequestTypeUnknown)), []any{requestTypeArg} + case service.RequestTypeWSV2: + return fmt.Sprintf("(request_type = $%d OR (request_type = %d AND openai_ws_mode = TRUE))", startArgIndex, int16(service.RequestTypeUnknown)), []any{requestTypeArg} + default: + return fmt.Sprintf("request_type = $%d", startArgIndex), []any{requestTypeArg} + } +} + func nullInt64(v *int64) sql.NullInt64 { if v == nil { return sql.NullInt64{} diff --git a/backend/internal/repository/usage_log_repo_request_type_test.go b/backend/internal/repository/usage_log_repo_request_type_test.go index 33082c046..95cf2a2d7 100644 --- a/backend/internal/repository/usage_log_repo_request_type_test.go +++ b/backend/internal/repository/usage_log_repo_request_type_test.go @@ -98,10 +98,10 @@ func TestUsageLogRepositoryListWithFiltersRequestTypePriority(t *testing.T) { Stream: &stream, } - mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_logs WHERE request_type = \\$1"). + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM usage_logs WHERE \\(request_type = \\$1 OR \\(request_type = 0 AND openai_ws_mode = TRUE\\)\\)"). WithArgs(requestType). WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(int64(0))) - mock.ExpectQuery("SELECT .* FROM usage_logs WHERE request_type = \\$1 ORDER BY id DESC LIMIT \\$2 OFFSET \\$3"). + mock.ExpectQuery("SELECT .* FROM usage_logs WHERE \\(request_type = \\$1 OR \\(request_type = 0 AND openai_ws_mode = TRUE\\)\\) ORDER BY id DESC LIMIT \\$2 OFFSET \\$3"). WithArgs(requestType, 20, 0). WillReturnRows(sqlmock.NewRows([]string{"id"})) @@ -122,7 +122,7 @@ func TestUsageLogRepositoryGetUsageTrendWithFiltersRequestTypePriority(t *testin requestType := int16(service.RequestTypeStream) stream := true - mock.ExpectQuery("AND request_type = \\$3"). + mock.ExpectQuery("AND \\(request_type = \\$3 OR \\(request_type = 0 AND stream = TRUE AND openai_ws_mode = FALSE\\)\\)"). WithArgs(start, end, requestType). WillReturnRows(sqlmock.NewRows([]string{"date", "requests", "input_tokens", "output_tokens", "cache_tokens", "total_tokens", "cost", "actual_cost"})) @@ -141,7 +141,7 @@ func TestUsageLogRepositoryGetModelStatsWithFiltersRequestTypePriority(t *testin requestType := int16(service.RequestTypeWSV2) stream := false - mock.ExpectQuery("AND request_type = \\$3"). + mock.ExpectQuery("AND \\(request_type = \\$3 OR \\(request_type = 0 AND openai_ws_mode = TRUE\\)\\)"). WithArgs(start, end, requestType). WillReturnRows(sqlmock.NewRows([]string{"model", "requests", "input_tokens", "output_tokens", "total_tokens", "cost", "actual_cost"})) @@ -162,7 +162,7 @@ func TestUsageLogRepositoryGetStatsWithFiltersRequestTypePriority(t *testing.T) Stream: &stream, } - mock.ExpectQuery("FROM usage_logs\\s+WHERE request_type = \\$1"). + mock.ExpectQuery("FROM usage_logs\\s+WHERE \\(request_type = \\$1 OR \\(request_type = 0 AND stream = FALSE AND openai_ws_mode = FALSE\\)\\)"). WithArgs(requestType). WillReturnRows(sqlmock.NewRows([]string{ "total_requests", @@ -182,6 +182,48 @@ func TestUsageLogRepositoryGetStatsWithFiltersRequestTypePriority(t *testing.T) require.NoError(t, mock.ExpectationsWereMet()) } +func TestBuildRequestTypeFilterConditionLegacyFallback(t *testing.T) { + tests := []struct { + name string + request int16 + wantWhere string + wantArg int16 + }{ + { + name: "sync_with_legacy_fallback", + request: int16(service.RequestTypeSync), + wantWhere: "(request_type = $3 OR (request_type = 0 AND stream = FALSE AND openai_ws_mode = FALSE))", + wantArg: int16(service.RequestTypeSync), + }, + { + name: "stream_with_legacy_fallback", + request: int16(service.RequestTypeStream), + wantWhere: "(request_type = $3 OR (request_type = 0 AND stream = TRUE AND openai_ws_mode = FALSE))", + wantArg: int16(service.RequestTypeStream), + }, + { + name: "ws_v2_with_legacy_fallback", + request: int16(service.RequestTypeWSV2), + wantWhere: "(request_type = $3 OR (request_type = 0 AND openai_ws_mode = TRUE))", + wantArg: int16(service.RequestTypeWSV2), + }, + { + name: "invalid_request_type_normalized_to_unknown", + request: int16(99), + wantWhere: "request_type = $3", + wantArg: int16(service.RequestTypeUnknown), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + where, args := buildRequestTypeFilterCondition(3, tt.request) + require.Equal(t, tt.wantWhere, where) + require.Equal(t, []any{tt.wantArg}, args) + }) + } +} + type usageLogScannerStub struct { values []any } diff --git a/backend/internal/service/openai_ws_fallback_test.go b/backend/internal/service/openai_ws_fallback_test.go index ea20f098d..ba1def78a 100644 --- a/backend/internal/service/openai_ws_fallback_test.go +++ b/backend/internal/service/openai_ws_fallback_test.go @@ -1,6 +1,7 @@ package service import ( + "context" "errors" "net/http" "testing" @@ -26,6 +27,47 @@ func TestClassifyOpenAIWSAcquireError(t *testing.T) { }) } +func TestClassifyOpenAIWSDialError(t *testing.T) { + t.Run("handshake_not_finished", func(t *testing.T) { + err := &openAIWSDialError{ + StatusCode: http.StatusBadGateway, + Err: errors.New("WebSocket protocol error: Handshake not finished"), + } + require.Equal(t, "handshake_not_finished", classifyOpenAIWSDialError(err)) + }) + + t.Run("context_deadline", func(t *testing.T) { + err := &openAIWSDialError{ + StatusCode: 0, + Err: context.DeadlineExceeded, + } + require.Equal(t, "ctx_deadline_exceeded", classifyOpenAIWSDialError(err)) + }) +} + +func TestSummarizeOpenAIWSDialError(t *testing.T) { + err := &openAIWSDialError{ + StatusCode: http.StatusBadGateway, + ResponseHeaders: http.Header{ + "Server": []string{"cloudflare"}, + "Via": []string{"1.1 example"}, + "Cf-Ray": []string{"abcd1234"}, + "X-Request-Id": []string{"req_123"}, + }, + Err: errors.New("WebSocket protocol error: Handshake not finished"), + } + + status, class, closeStatus, closeReason, server, via, cfRay, reqID := summarizeOpenAIWSDialError(err) + require.Equal(t, http.StatusBadGateway, status) + require.Equal(t, "handshake_not_finished", class) + require.Equal(t, "-", closeStatus) + require.Equal(t, "-", closeReason) + require.Equal(t, "cloudflare", server) + require.Equal(t, "1.1 example", via) + require.Equal(t, "abcd1234", cfRay) + require.Equal(t, "req_123", reqID) +} + func TestClassifyOpenAIWSErrorEvent(t *testing.T) { reason, recoverable := classifyOpenAIWSErrorEvent([]byte(`{"type":"error","error":{"code":"upgrade_required","message":"Upgrade required"}}`)) require.Equal(t, "upgrade_required", reason) diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index ca81dec1c..957b8c1c8 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -572,6 +572,100 @@ func summarizeOpenAIWSReadCloseError(err error) (status string, reason string) { return normalizeOpenAIWSLogValue(closeStatus), closeReason } +func unwrapOpenAIWSDialBaseError(err error) error { + if err == nil { + return nil + } + var dialErr *openAIWSDialError + if errors.As(err, &dialErr) && dialErr != nil && dialErr.Err != nil { + return dialErr.Err + } + return err +} + +func openAIWSDialRespHeaderForLog(err error, key string) string { + var dialErr *openAIWSDialError + if !errors.As(err, &dialErr) || dialErr == nil || dialErr.ResponseHeaders == nil { + return "-" + } + return truncateOpenAIWSLogValue(dialErr.ResponseHeaders.Get(key), openAIWSHeaderValueMaxLen) +} + +func classifyOpenAIWSDialError(err error) string { + if err == nil { + return "-" + } + baseErr := unwrapOpenAIWSDialBaseError(err) + if baseErr == nil { + return "-" + } + if errors.Is(baseErr, context.DeadlineExceeded) { + return "ctx_deadline_exceeded" + } + if errors.Is(baseErr, context.Canceled) { + return "ctx_canceled" + } + var netErr net.Error + if errors.As(baseErr, &netErr) && netErr.Timeout() { + return "net_timeout" + } + if status := coderws.CloseStatus(baseErr); status != -1 { + return normalizeOpenAIWSLogValue(fmt.Sprintf("ws_close_%d", int(status))) + } + message := strings.ToLower(strings.TrimSpace(baseErr.Error())) + switch { + case strings.Contains(message, "handshake not finished"): + return "handshake_not_finished" + case strings.Contains(message, "bad handshake"): + return "bad_handshake" + case strings.Contains(message, "connection refused"): + return "connection_refused" + case strings.Contains(message, "no such host"): + return "dns_not_found" + case strings.Contains(message, "tls"): + return "tls_error" + case strings.Contains(message, "i/o timeout"): + return "io_timeout" + case strings.Contains(message, "context deadline exceeded"): + return "ctx_deadline_exceeded" + default: + return "dial_error" + } +} + +func summarizeOpenAIWSDialError(err error) ( + statusCode int, + dialClass string, + closeStatus string, + closeReason string, + respServer string, + respVia string, + respCFRay string, + respRequestID string, +) { + dialClass = "-" + closeStatus = "-" + closeReason = "-" + respServer = "-" + respVia = "-" + respCFRay = "-" + respRequestID = "-" + if err == nil { + return + } + var dialErr *openAIWSDialError + if errors.As(err, &dialErr) && dialErr != nil { + statusCode = dialErr.StatusCode + respServer = openAIWSDialRespHeaderForLog(err, "server") + respVia = openAIWSDialRespHeaderForLog(err, "via") + respCFRay = openAIWSDialRespHeaderForLog(err, "cf-ray") + respRequestID = openAIWSDialRespHeaderForLog(err, "x-request-id") + } + dialClass = normalizeOpenAIWSLogValue(classifyOpenAIWSDialError(err)) + closeStatus, closeReason = summarizeOpenAIWSReadCloseError(unwrapOpenAIWSDialBaseError(err)) + return +} + func isOpenAIWSClientDisconnectError(err error) bool { if err == nil { return false @@ -1150,20 +1244,27 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( }(), }) if err != nil { - dialStatus := 0 - var dialErr *openAIWSDialError - if errors.As(err, &dialErr) { - dialStatus = dialErr.StatusCode - } + dialStatus, dialClass, dialCloseStatus, dialCloseReason, dialRespServer, dialRespVia, dialRespCFRay, dialRespReqID := summarizeOpenAIWSDialError(err) logOpenAIWSModeInfo( - "acquire_fail account_id=%d account_type=%s transport=%s reason=%s dial_status=%d cause=%s preferred_conn_id=%s", + "acquire_fail account_id=%d account_type=%s transport=%s reason=%s dial_status=%d dial_class=%s dial_close_status=%s dial_close_reason=%s dial_resp_server=%s dial_resp_via=%s dial_resp_cf_ray=%s dial_resp_x_request_id=%s cause=%s preferred_conn_id=%s force_new_conn=%v ws_host=%s ws_path=%s proxy_enabled=%v", account.ID, account.Type, normalizeOpenAIWSLogValue(string(decision.Transport)), normalizeOpenAIWSLogValue(classifyOpenAIWSAcquireError(err)), dialStatus, + dialClass, + dialCloseStatus, + truncateOpenAIWSLogValue(dialCloseReason, openAIWSHeaderValueMaxLen), + dialRespServer, + dialRespVia, + dialRespCFRay, + dialRespReqID, truncateOpenAIWSLogValue(err.Error(), openAIWSLogValueMaxLen), truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + forceNewConn, + wsHost, + wsPath, + account.ProxyID != nil && account.Proxy != nil, ) return nil, wrapOpenAIWSFallback(classifyOpenAIWSAcquireError(err), err) } @@ -1775,6 +1876,26 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( lease, acquireErr := pool.Acquire(acquireCtx, req) acquireCancel() if acquireErr != nil { + dialStatus, dialClass, dialCloseStatus, dialCloseReason, dialRespServer, dialRespVia, dialRespCFRay, dialRespReqID := summarizeOpenAIWSDialError(acquireErr) + logOpenAIWSModeInfo( + "ingress_ws_upstream_acquire_fail account_id=%d turn=%d reason=%s dial_status=%d dial_class=%s dial_close_status=%s dial_close_reason=%s dial_resp_server=%s dial_resp_via=%s dial_resp_cf_ray=%s dial_resp_x_request_id=%s cause=%s preferred_conn_id=%s ws_host=%s ws_path=%s proxy_enabled=%v", + account.ID, + turn, + normalizeOpenAIWSLogValue(classifyOpenAIWSAcquireError(acquireErr)), + dialStatus, + dialClass, + dialCloseStatus, + truncateOpenAIWSLogValue(dialCloseReason, openAIWSHeaderValueMaxLen), + dialRespServer, + dialRespVia, + dialRespCFRay, + dialRespReqID, + truncateOpenAIWSLogValue(acquireErr.Error(), openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(preferred, openAIWSIDValueMaxLen), + wsHost, + wsPath, + account.ProxyID != nil && account.Proxy != nil, + ) if errors.Is(acquireErr, context.DeadlineExceeded) || errors.Is(acquireErr, errOpenAIWSConnQueueFull) { return nil, NewOpenAIWSClientCloseError( coderws.StatusTryAgainLater, diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index e6c0a6178..85d81bae2 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -32,8 +32,9 @@ var ( ) type openAIWSDialError struct { - StatusCode int - Err error + StatusCode int + ResponseHeaders http.Header + Err error } func (e *openAIWSDialError) Error() string { @@ -1085,10 +1086,18 @@ func (p *openAIWSConnPool) dialConn(ctx context.Context, req openAIWSAcquireRequ } conn, status, handshakeHeaders, err := p.clientDialer.Dial(ctx, req.WSURL, req.Headers, req.ProxyURL) if err != nil { - return nil, &openAIWSDialError{StatusCode: status, Err: err} + return nil, &openAIWSDialError{ + StatusCode: status, + ResponseHeaders: cloneHeader(handshakeHeaders), + Err: err, + } } if conn == nil { - return nil, &openAIWSDialError{StatusCode: status, Err: errors.New("openai ws dialer returned nil connection")} + return nil, &openAIWSDialError{ + StatusCode: status, + ResponseHeaders: cloneHeader(handshakeHeaders), + Err: errors.New("openai ws dialer returned nil connection"), + } } id := p.nextConnID(req.Account.ID) return newOpenAIWSConn(id, req.Account.ID, conn, handshakeHeaders), nil diff --git a/backend/migrations/001_init.sql b/backend/migrations/001_init.sql index 0dae3055f..64078c42d 100644 --- a/backend/migrations/001_init.sql +++ b/backend/migrations/001_init.sql @@ -158,7 +158,6 @@ CREATE TABLE IF NOT EXISTS usage_logs ( actual_cost DECIMAL(20, 10) NOT NULL DEFAULT 0, -- 实际扣除费用 -- 元数据 - request_type SMALLINT NOT NULL DEFAULT 0 CHECK (request_type IN (0, 1, 2, 3)), stream BOOLEAN NOT NULL DEFAULT FALSE, duration_ms INT, @@ -171,4 +170,3 @@ CREATE INDEX IF NOT EXISTS idx_usage_logs_account_id ON usage_logs(account_id); CREATE INDEX IF NOT EXISTS idx_usage_logs_model ON usage_logs(model); CREATE INDEX IF NOT EXISTS idx_usage_logs_created_at ON usage_logs(created_at); CREATE INDEX IF NOT EXISTS idx_usage_logs_user_created ON usage_logs(user_id, created_at); -CREATE INDEX IF NOT EXISTS idx_usage_logs_request_type_created_at ON usage_logs(request_type, created_at); From 71df29222ede804917b858a467d58417feca1ef8 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 16:50:52 +0800 Subject: [PATCH 021/120] =?UTF-8?q?feat(openai):=20=E4=BC=98=E5=8C=96=20WS?= =?UTF-8?q?=20v2=20=E6=80=A7=E8=83=BD=E4=B8=8E=E7=A8=B3=E5=AE=9A=E6=80=A7?= =?UTF-8?q?=E5=B9=B6=E8=A1=A5=E5=85=85=E9=AB=98=E8=A6=86=E7=9B=96=E6=B5=8B?= =?UTF-8?q?=E8=AF=95?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 修复 WS error 事件连接复用污染风险,统一标记连接为 broken - 写超时继承父 context,降低客户端断连后的资源占用 - 增加连接池后台心跳/清理机制并拆分读写锁,提升连接健康性 - 增强协议决策与状态存储超时策略,补齐告警日志 - 下调消息读上限并统一并发参数来源,修复配置不一致 - 新增与强化 forwarder/pool/state_store/protocol/handler 单元测试 --- backend/cmd/server/VERSION | 2 +- backend/cmd/server/wire.go | 7 + backend/cmd/server/wire_gen.go | 9 +- .../handler/openai_gateway_handler.go | 8 +- .../service/openai_gateway_service.go | 8 + backend/internal/service/openai_ws_client.go | 3 +- .../internal/service/openai_ws_client_test.go | 16 + .../internal/service/openai_ws_forwarder.go | 122 +++++-- ...penai_ws_forwarder_ingress_session_test.go | 135 +++++++- .../openai_ws_forwarder_ingress_test.go | 2 + .../openai_ws_forwarder_success_test.go | 83 ++++- backend/internal/service/openai_ws_pool.go | 187 ++++++++++- .../internal/service/openai_ws_pool_test.go | 304 ++++++++++++++++++ .../service/openai_ws_protocol_resolver.go | 15 +- .../openai_ws_protocol_resolver_test.go | 28 ++ .../internal/service/openai_ws_state_store.go | 20 +- .../service/openai_ws_state_store_test.go | 79 +++++ 17 files changed, 967 insertions(+), 61 deletions(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index a6b531056..44cd42b3b 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.85.3 +0.1.85.4 diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index 1ba6b1848..f448cd730 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -84,6 +84,7 @@ func provideCleanup( openaiOAuth *service.OpenAIOAuthService, geminiOAuth *service.GeminiOAuthService, antigravityOAuth *service.AntigravityOAuthService, + openAIGateway *service.OpenAIGatewayService, ) func() { return func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -206,6 +207,12 @@ func provideCleanup( antigravityOAuth.Stop() return nil }}, + {"OpenAIWSPool", func() error { + if openAIGateway != nil { + openAIGateway.CloseOpenAIWSPool() + } + return nil + }}, {"Redis", func() error { return rdb.Close() }}, diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index cc8670b2b..045fec477 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -212,7 +212,7 @@ func initializeApplication(buildInfo handler.BuildInfo) (*Application, error) { tokenRefreshService := service.ProvideTokenRefreshService(accountRepository, soraAccountRepository, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, compositeTokenCacheInvalidator, schedulerCache, configConfig) accountExpiryService := service.ProvideAccountExpiryService(accountRepository) subscriptionExpiryService := service.ProvideSubscriptionExpiryService(userSubscriptionRepository) - v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService) + v := provideCleanup(client, redisClient, opsMetricsCollector, opsAggregationService, opsAlertEvaluatorService, opsCleanupService, opsScheduledReportService, opsSystemLogSink, soraMediaCleanupService, schedulerSnapshotService, tokenRefreshService, accountExpiryService, subscriptionExpiryService, usageCleanupService, idempotencyCleanupService, pricingService, emailQueueService, billingCacheService, usageRecordWorkerPool, subscriptionService, oAuthService, openAIOAuthService, geminiOAuthService, antigravityOAuthService, openAIGatewayService) application := &Application{ Server: httpServer, Cleanup: v, @@ -259,6 +259,7 @@ func provideCleanup( openaiOAuth *service.OpenAIOAuthService, geminiOAuth *service.GeminiOAuthService, antigravityOAuth *service.AntigravityOAuthService, + openAIGateway *service.OpenAIGatewayService, ) func() { return func() { ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) @@ -380,6 +381,12 @@ func provideCleanup( antigravityOAuth.Stop() return nil }}, + {"OpenAIWSPool", func() error { + if openAIGateway != nil { + openAIGateway.CloseOpenAIWSPool() + } + return nil + }}, {"Redis", func() error { return rdb.Close() }}, diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index e7583c04d..1d16d9ffb 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -549,7 +549,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { defer func() { _ = wsConn.CloseNow() }() - wsConn.SetReadLimit(128 * 1024 * 1024) + wsConn.SetReadLimit(16 * 1024 * 1024) ctx := c.Request.Context() readCtx, cancel := context.WithTimeout(ctx, 30*time.Second) @@ -643,6 +643,10 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { } account := selection.Account + accountMaxConcurrency := account.Concurrency + if selection.WaitPlan != nil && selection.WaitPlan.MaxConcurrency > 0 { + accountMaxConcurrency = selection.WaitPlan.MaxConcurrency + } accountReleaseFunc := selection.ReleaseFunc if !selection.Acquired { if selection.WaitPlan == nil { @@ -699,7 +703,7 @@ func (h *OpenAIGatewayHandler) ResponsesWebSocket(c *gin.Context) { if !userAcquired { return service.NewOpenAIWSClientCloseError(coderws.StatusTryAgainLater, "too many concurrent requests, please retry later", nil) } - accountReleaseFunc, accountAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(ctx, account.ID, account.Concurrency) + accountReleaseFunc, accountAcquired, err := h.concurrencyHelper.TryAcquireAccountSlot(ctx, account.ID, accountMaxConcurrency) if err != nil { if userReleaseFunc != nil { userReleaseFunc() diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index f9551a4b0..ffa2510e5 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -300,6 +300,14 @@ func NewOpenAIGatewayService( return svc } +// CloseOpenAIWSPool 关闭 OpenAI WebSocket 连接池的后台 worker 和空闲连接。 +// 应在应用优雅关闭时调用。 +func (s *OpenAIGatewayService) CloseOpenAIWSPool() { + if s != nil && s.openaiWSPool != nil { + s.openaiWSPool.Close() + } +} + func (s *OpenAIGatewayService) logOpenAIWSModeBootstrap() { if s == nil || s.cfg == nil { return diff --git a/backend/internal/service/openai_ws_client.go b/backend/internal/service/openai_ws_client.go index 7d2c7dd55..9f3c47b7b 100644 --- a/backend/internal/service/openai_ws_client.go +++ b/backend/internal/service/openai_ws_client.go @@ -15,7 +15,7 @@ import ( "github.com/coder/websocket/wsjson" ) -const openAIWSMessageReadLimitBytes int64 = 128 * 1024 * 1024 +const openAIWSMessageReadLimitBytes int64 = 16 * 1024 * 1024 const ( openAIWSProxyTransportMaxIdleConns = 128 openAIWSProxyTransportMaxIdleConnsPerHost = 64 @@ -135,6 +135,7 @@ func (d *coderOpenAIWSClientDialer) proxyHTTPClient(proxy string) (*http.Client, MaxIdleConns: openAIWSProxyTransportMaxIdleConns, MaxIdleConnsPerHost: openAIWSProxyTransportMaxIdleConnsPerHost, IdleConnTimeout: openAIWSProxyTransportIdleConnTimeout, + TLSHandshakeTimeout: 10 * time.Second, ForceAttemptHTTP2: true, } client := &http.Client{Transport: transport} diff --git a/backend/internal/service/openai_ws_client_test.go b/backend/internal/service/openai_ws_client_test.go index e78c86284..a88d62665 100644 --- a/backend/internal/service/openai_ws_client_test.go +++ b/backend/internal/service/openai_ws_client_test.go @@ -2,6 +2,7 @@ package service import ( "fmt" + "net/http" "testing" "time" @@ -94,3 +95,18 @@ func TestCoderOpenAIWSClientDialer_ProxyClientCacheIdleTTL(t *testing.T) { require.False(t, exists, "超过空闲 TTL 的代理客户端应被回收") } + +func TestCoderOpenAIWSClientDialer_ProxyTransportTLSHandshakeTimeout(t *testing.T) { + dialer := newDefaultOpenAIWSClientDialer() + impl, ok := dialer.(*coderOpenAIWSClientDialer) + require.True(t, ok) + + client, err := impl.proxyHTTPClient("http://127.0.0.1:38080") + require.NoError(t, err) + require.NotNil(t, client) + + transport, ok := client.Transport.(*http.Transport) + require.True(t, ok) + require.NotNil(t, transport) + require.Equal(t, 10*time.Second, transport.TLSHandshakeTimeout) +} diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 957b8c1c8..0b017552c 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -57,6 +57,13 @@ const ( openAIWSStoreDisabledConnModeOff = "off" ) +var openAIWSLogValueReplacer = strings.NewReplacer( + "error", "err", + "fallback", "fb", + "warning", "warnx", + "failed", "fail", +) + // openAIWSFallbackError 表示可安全回退到 HTTP 的 WS 错误(尚未写下游)。 type openAIWSFallbackError struct { Reason string @@ -142,13 +149,7 @@ func normalizeOpenAIWSLogValue(value string) string { if trimmed == "" { return "-" } - replacer := strings.NewReplacer( - "error", "err", - "fallback", "fb", - "warning", "warnx", - "failed", "fail", - ) - return replacer.Replace(trimmed) + return openAIWSLogValueReplacer.Replace(trimmed) } func truncateOpenAIWSLogValue(value string, maxLen int) string { @@ -268,6 +269,10 @@ func openAIWSEventMayContainToolCalls(eventType string) bool { } } +func openAIWSEventShouldParseUsage(eventType string) bool { + return strings.TrimSpace(eventType) == "response.completed" +} + func summarizeOpenAIWSErrorEventFields(message []byte) (code string, errType string, errMessage string) { if len(message) == 0 { return "-", "-", "-" @@ -552,6 +557,19 @@ func logOpenAIWSModeDebug(format string, args ...any) { logger.LegacyPrintf("service.openai_gateway", "[debug] [OpenAI WS Mode][openai_ws_mode=true] "+format, args...) } +func logOpenAIWSBindResponseAccountWarn(groupID, accountID int64, responseID string, err error) { + if err == nil { + return + } + logger.L().Warn( + "openai.ws_bind_response_account_failed", + zap.Int64("group_id", groupID), + zap.Int64("account_id", accountID), + zap.String("response_id", truncateOpenAIWSLogValue(responseID, openAIWSIDValueMaxLen)), + zap.Error(err), + ) +} + func summarizeOpenAIWSReadCloseError(err error) (status string, reason string) { if err == nil { return "-", "-" @@ -683,7 +701,9 @@ func isOpenAIWSClientDisconnectError(err error) bool { } return strings.Contains(message, "failed to read frame header: eof") || strings.Contains(message, "unexpected eof") || - strings.Contains(message, "use of closed network connection") + strings.Contains(message, "use of closed network connection") || + strings.Contains(message, "connection reset by peer") || + strings.Contains(message, "broken pipe") } func classifyOpenAIWSReadFallbackReason(err error) string { @@ -1321,7 +1341,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( return nil, err } - if err := lease.WriteJSON(payload, s.openAIWSWriteTimeout()); err != nil { + if err := lease.WriteJSONWithContextTimeout(ctx, payload, s.openAIWSWriteTimeout()); err != nil { lease.MarkBroken() logOpenAIWSModeInfo( "write_request_fail account_id=%d conn_id=%s cause=%s payload_bytes=%d", @@ -1509,15 +1529,19 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( ) } - if needModelReplace && openAIWSEventMayContainModel(eventType) { - message = replaceOpenAIWSMessageModel(message, mappedModel, originalModel) - } - if openAIWSEventMayContainToolCalls(eventType) { - if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(message); changed { - message = corrected + if !clientDisconnected { + if needModelReplace && openAIWSEventMayContainModel(eventType) { + message = replaceOpenAIWSMessageModel(message, mappedModel, originalModel) + } + if openAIWSEventMayContainToolCalls(eventType) { + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(message); changed { + message = corrected + } } } - s.parseSSEUsageBytes(message, usage) + if openAIWSEventShouldParseUsage(eventType) { + s.parseSSEUsageBytes(message, usage) + } if eventType == "error" { errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) @@ -1537,9 +1561,9 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( errType, errMessage, ) + // error 事件后连接不再可复用,避免回池后污染下一请求。 + lease.MarkBroken() if !wroteDownstream && canFallback { - // 避免复用“已返回 error 且可能被上游关闭”的连接,防止下一轮重试空转 read_fail。 - lease.MarkBroken() return nil, wrapOpenAIWSFallback(fallbackReason, errors.New(errMsg)) } statusCode := openAIWSErrorHTTPStatus(message) @@ -1634,7 +1658,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if responseID != "" && stateStore != nil { ttl := s.openAIWSResponseStickyTTL() - _ = stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl) + logOpenAIWSBindResponseAccountWarn(groupID, account.ID, responseID, stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl)) stateStore.BindResponseConn(responseID, lease.ConnID(), ttl) } if stateStore != nil && storeDisabled && sessionHash != "" { @@ -1957,7 +1981,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return nil, errors.New("upstream websocket lease is nil") } turnStart := time.Now() - if err := lease.WriteJSON(payload, s.openAIWSWriteTimeout()); err != nil { + if err := lease.WriteJSONWithContextTimeout(ctx, payload, s.openAIWSWriteTimeout()); err != nil { return nil, fmt.Errorf("write upstream websocket request: %w", err) } logOpenAIWSModeInfo( @@ -1978,6 +2002,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( firstEventType := "" lastEventType := "" needModelReplace := false + clientDisconnected := false mappedModel := "" if originalModel != "" { mappedModel = account.GetMappedModel(originalModel) @@ -2019,26 +2044,47 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( ms := int(time.Since(turnStart).Milliseconds()) firstTokenMs = &ms } - s.parseSSEUsageBytes(upstreamMessage, &usage) - - if needModelReplace && openAIWSEventMayContainModel(eventType) { - upstreamMessage = replaceOpenAIWSMessageModel(upstreamMessage, mappedModel, originalModel) + if openAIWSEventShouldParseUsage(eventType) { + s.parseSSEUsageBytes(upstreamMessage, &usage) } - if openAIWSEventMayContainToolCalls(eventType) { - if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(upstreamMessage); changed { - upstreamMessage = corrected + + if !clientDisconnected { + if needModelReplace && openAIWSEventMayContainModel(eventType) { + upstreamMessage = replaceOpenAIWSMessageModel(upstreamMessage, mappedModel, originalModel) + } + if openAIWSEventMayContainToolCalls(eventType) { + if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(upstreamMessage); changed { + upstreamMessage = corrected + } + } + if err := writeClientMessage(upstreamMessage); err != nil { + if isOpenAIWSClientDisconnectError(err) { + clientDisconnected = true + closeStatus, closeReason := summarizeOpenAIWSReadCloseError(err) + logOpenAIWSModeInfo( + "ingress_ws_client_disconnected_drain account_id=%d turn=%d conn_id=%s close_status=%s close_reason=%s", + account.ID, + turn, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + closeStatus, + truncateOpenAIWSLogValue(closeReason, openAIWSHeaderValueMaxLen), + ) + } else { + return nil, fmt.Errorf("write client websocket event: %w", err) + } } - } - if err := writeClientMessage(upstreamMessage); err != nil { - return nil, fmt.Errorf("write client websocket event: %w", err) } if isTerminalEvent { + // 客户端已断连时,上游连接的 session 状态不可信,标记 broken 避免回池复用。 + if clientDisconnected { + lease.MarkBroken() + } firstTokenMsValue := -1 if firstTokenMs != nil { firstTokenMsValue = *firstTokenMs } logOpenAIWSModeInfo( - "ingress_ws_turn_completed account_id=%d turn=%d conn_id=%s response_id=%s duration_ms=%d events=%d token_events=%d terminal_events=%d first_event=%s last_event=%s first_token_ms=%d", + "ingress_ws_turn_completed account_id=%d turn=%d conn_id=%s response_id=%s duration_ms=%d events=%d token_events=%d terminal_events=%d first_event=%s last_event=%s first_token_ms=%d client_disconnected=%v", account.ID, turn, truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), @@ -2050,6 +2096,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( truncateOpenAIWSLogValue(firstEventType, openAIWSLogValueMaxLen), truncateOpenAIWSLogValue(lastEventType, openAIWSLogValueMaxLen), firstTokenMsValue, + clientDisconnected, ) return &OpenAIForwardResult{ RequestID: responseID, @@ -2117,7 +2164,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( if responseID != "" && stateStore != nil { ttl := s.openAIWSResponseStickyTTL() - _ = stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl) + logOpenAIWSBindResponseAccountWarn(groupID, account.ID, responseID, stateStore.BindResponseAccount(ctx, groupID, responseID, account.ID, ttl)) stateStore.BindResponseConn(responseID, connID, ttl) } if stateStore != nil && storeDisabled && sessionHash != "" { @@ -2241,7 +2288,7 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( prewarmPayload["generate"] = false prewarmPayloadJSON := payloadAsJSONBytes(prewarmPayload) - if err := lease.WriteJSON(prewarmPayload, s.openAIWSWriteTimeout()); err != nil { + if err := lease.WriteJSONWithContextTimeout(ctx, prewarmPayload, s.openAIWSWriteTimeout()); err != nil { lease.MarkBroken() logOpenAIWSModeInfo( "prewarm_write_fail account_id=%d conn_id=%s cause=%s", @@ -2326,7 +2373,7 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( lease.MarkPrewarmed() if prewarmResponseID != "" && stateStore != nil { ttl := s.openAIWSResponseStickyTTL() - _ = stateStore.BindResponseAccount(ctx, groupID, prewarmResponseID, account.ID, ttl) + logOpenAIWSBindResponseAccountWarn(groupID, account.ID, prewarmResponseID, stateStore.BindResponseAccount(ctx, groupID, prewarmResponseID, account.ID, ttl)) stateStore.BindResponseConn(prewarmResponseID, lease.ConnID(), ttl) } logOpenAIWSModeInfo( @@ -2489,7 +2536,12 @@ func (s *OpenAIGatewayService) SelectAccountByPreviousResponseID( result, acquireErr := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) if acquireErr == nil && result.Acquired { - _ = store.BindResponseAccount(ctx, derefGroupID(groupID), responseID, accountID, s.openAIWSResponseStickyTTL()) + logOpenAIWSBindResponseAccountWarn( + derefGroupID(groupID), + accountID, + responseID, + store.BindResponseAccount(ctx, derefGroupID(groupID), responseID, accountID, s.openAIWSResponseStickyTTL()), + ) return &AccountSelectionResult{ Account: account, Acquired: true, diff --git a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go index d1aa7093c..a2d456bb0 100644 --- a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go +++ b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go @@ -94,7 +94,7 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT ginCtx, _ := gin.CreateTestContext(rec) req := r.Clone(r.Context()) req.Header = req.Header.Clone() - req.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + req.Header.Set("User-Agent", "unit-test-agent/1.0") ginCtx.Request = req readCtx, cancel := context.WithTimeout(r.Context(), 3*time.Second) @@ -161,3 +161,136 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT require.Equal(t, 1, captureDialer.DialCount(), "同一 ingress 会话应保持同一上游连接") require.Len(t, captureConn.writes, 2, "应向同一上游连接发送两轮 response.create") } + +func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_ClientDisconnectStillDrainsUpstream(t *testing.T) { + gin.SetMode(gin.TestMode) + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + // 多个上游事件:前几个为非 terminal 事件,最后一个为 terminal。 + // 第一个事件延迟 250ms 让客户端 RST 有时间传播,使 writeClientMessage 可靠失败。 + captureConn := &openAIWSCaptureConn{ + readDelays: []time.Duration{250 * time.Millisecond, 0, 0}, + events: [][]byte{ + []byte(`{"type":"response.created","response":{"id":"resp_ingress_disconnect","model":"gpt-5.1"}}`), + []byte(`{"type":"response.output_item.added","response":{"id":"resp_ingress_disconnect"}}`), + []byte(`{"type":"response.completed","response":{"id":"resp_ingress_disconnect","model":"gpt-5.1","usage":{"input_tokens":2,"output_tokens":1}}}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 115, + Name: "openai-ingress-client-disconnect", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "model_mapping": map[string]any{ + "custom-original-model": "gpt-5.1", + }, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + serverErrCh := make(chan error, 1) + resultCh := make(chan *OpenAIForwardResult, 1) + hooks := &OpenAIWSIngressHooks{ + AfterTurn: func(_ int, result *OpenAIForwardResult, turnErr error) { + if turnErr == nil && result != nil { + resultCh <- result + } + }, + } + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := coderws.Accept(w, r, &coderws.AcceptOptions{ + CompressionMode: coderws.CompressionContextTakeover, + }) + if err != nil { + serverErrCh <- err + return + } + defer func() { + _ = conn.CloseNow() + }() + + rec := httptest.NewRecorder() + ginCtx, _ := gin.CreateTestContext(rec) + req := r.Clone(r.Context()) + req.Header = req.Header.Clone() + req.Header.Set("User-Agent", "unit-test-agent/1.0") + ginCtx.Request = req + + readCtx, cancel := context.WithTimeout(r.Context(), 3*time.Second) + msgType, firstMessage, readErr := conn.Read(readCtx) + cancel() + if readErr != nil { + serverErrCh <- readErr + return + } + if msgType != coderws.MessageText && msgType != coderws.MessageBinary { + serverErrCh <- errors.New("unsupported websocket client message type") + return + } + + serverErrCh <- svc.ProxyResponsesWebSocketFromClient(r.Context(), ginCtx, conn, account, "sk-test", firstMessage, hooks) + })) + defer wsServer.Close() + + dialCtx, cancelDial := context.WithTimeout(context.Background(), 3*time.Second) + clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http"), nil) + cancelDial() + require.NoError(t, err) + + writeCtx, cancelWrite := context.WithTimeout(context.Background(), 3*time.Second) + err = clientConn.Write(writeCtx, coderws.MessageText, []byte(`{"type":"response.create","model":"custom-original-model","stream":false}`)) + cancelWrite() + require.NoError(t, err) + // 立即关闭客户端,模拟客户端在 relay 期间断连。 + require.NoError(t, clientConn.CloseNow(), "模拟 ingress 客户端提前断连") + + select { + case serverErr := <-serverErrCh: + require.NoError(t, serverErr, "客户端断连后应继续 drain 上游直到 terminal 或正常结束") + case <-time.After(5 * time.Second): + t.Fatal("等待 ingress websocket 结束超时") + } + + select { + case result := <-resultCh: + require.Equal(t, "resp_ingress_disconnect", result.RequestID) + require.Equal(t, 2, result.Usage.InputTokens) + require.Equal(t, 1, result.Usage.OutputTokens) + case <-time.After(2 * time.Second): + t.Fatal("未收到断连后的 turn 结果回调") + } +} diff --git a/backend/internal/service/openai_ws_forwarder_ingress_test.go b/backend/internal/service/openai_ws_forwarder_ingress_test.go index bb5761878..8760cbdc4 100644 --- a/backend/internal/service/openai_ws_forwarder_ingress_test.go +++ b/backend/internal/service/openai_ws_forwarder_ingress_test.go @@ -29,6 +29,8 @@ func TestIsOpenAIWSClientDisconnectError(t *testing.T) { {name: "ws_abnormal_1006", err: coderws.CloseError{Code: coderws.StatusAbnormalClosure}, want: true}, {name: "ws_policy_violation", err: coderws.CloseError{Code: coderws.StatusPolicyViolation}, want: false}, {name: "wrapped_eof_message", err: errors.New("failed to get reader: failed to read frame header: EOF"), want: true}, + {name: "connection_reset_by_peer", err: errors.New("failed to read frame header: read tcp 127.0.0.1:1234->127.0.0.1:5678: read: connection reset by peer"), want: true}, + {name: "broken_pipe", err: errors.New("write tcp 127.0.0.1:1234->127.0.0.1:5678: write: broken pipe"), want: true}, } for _, tt := range tests { diff --git a/backend/internal/service/openai_ws_forwarder_success_test.go b/backend/internal/service/openai_ws_forwarder_success_test.go index e41bfec61..516801588 100644 --- a/backend/internal/service/openai_ws_forwarder_success_test.go +++ b/backend/internal/service/openai_ws_forwarder_success_test.go @@ -3,6 +3,7 @@ package service import ( "context" "encoding/json" + "errors" "io" "net/http" "net/http/httptest" @@ -88,7 +89,7 @@ func TestOpenAIGatewayService_Forward_WSv2_SuccessAndBindSticky(t *testing.T) { rec := httptest.NewRecorder() c, _ := gin.CreateTestContext(rec) c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) - c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + c.Request.Header.Set("User-Agent", "unit-test-agent/1.0") groupID := int64(1001) c.Set("api_key", &APIKey{GroupID: &groupID}) @@ -180,6 +181,86 @@ func requestToJSONString(payload map[string]any) string { return string(b) } +func TestLogOpenAIWSBindResponseAccountWarn(t *testing.T) { + require.NotPanics(t, func() { + logOpenAIWSBindResponseAccountWarn(1, 2, "resp_ok", nil) + }) + require.NotPanics(t, func() { + logOpenAIWSBindResponseAccountWarn(1, 2, "resp_err", errors.New("bind failed")) + }) +} + +func TestOpenAIGatewayService_Forward_WSv2_RewriteModelAndToolCallsOnCompletedEvent(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + c.Request.Header.Set("User-Agent", "codex_cli_rs/0.98.0") + groupID := int64(3001) + c.Set("api_key", &APIKey{GroupID: &groupID}) + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 5 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + captureConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_model_tool_1","model":"gpt-5.1","tool_calls":[{"function":{"name":"apply_patch","arguments":"{\"file_path\":\"/tmp/a.txt\",\"old_string\":\"a\",\"new_string\":\"b\"}"}}],"usage":{"input_tokens":2,"output_tokens":1}},"tool_calls":[{"function":{"name":"apply_patch","arguments":"{\"file_path\":\"/tmp/a.txt\",\"old_string\":\"a\",\"new_string\":\"b\"}"}}]}`), + }, + } + captureDialer := &openAIWSCaptureDialer{conn: captureConn} + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(captureDialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 1301, + Name: "openai-rewrite", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + "model_mapping": map[string]any{ + "custom-original-model": "gpt-5.1", + }, + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + body := []byte(`{"model":"custom-original-model","stream":false,"input":[{"type":"input_text","text":"hello"}]}`) + result, err := svc.Forward(context.Background(), c, account, body) + require.NoError(t, err) + require.NotNil(t, result) + require.Equal(t, "resp_model_tool_1", result.RequestID) + require.Equal(t, "custom-original-model", gjson.GetBytes(rec.Body.Bytes(), "model").String(), "响应模型应回写为原始请求模型") + require.Equal(t, "edit", gjson.GetBytes(rec.Body.Bytes(), "tool_calls.0.function.name").String(), "工具名称应被修正为 OpenCode 规范") +} + func TestOpenAIWSPayloadString_OnlyAcceptsStringValues(t *testing.T) { payload := map[string]any{ "type": nil, diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index 85d81bae2..ee8f1256a 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -21,6 +21,8 @@ const ( openAIWSConnHealthCheckTO = 2 * time.Second openAIWSConnPrewarmExtraDelay = 2 * time.Second openAIWSAcquireCleanupInterval = 3 * time.Second + openAIWSBackgroundPingInterval = 30 * time.Second + openAIWSBackgroundSweepTicker = 30 * time.Second openAIWSPrewarmFailureWindow = 30 * time.Second openAIWSPrewarmFailureSuppress = 2 @@ -127,7 +129,14 @@ func (l *openAIWSConnLease) WriteJSON(value any, timeout time.Duration) error { if l == nil || l.conn == nil { return errOpenAIWSConnClosed } - return l.conn.writeJSONWithTimeout(value, timeout) + return l.conn.writeJSONWithTimeout(context.Background(), value, timeout) +} + +func (l *openAIWSConnLease) WriteJSONWithContextTimeout(ctx context.Context, value any, timeout time.Duration) error { + if l == nil || l.conn == nil { + return errOpenAIWSConnClosed + } + return l.conn.writeJSONWithTimeout(ctx, value, timeout) } func (l *openAIWSConnLease) WriteJSONContext(ctx context.Context, value any) error { @@ -185,7 +194,8 @@ type openAIWSConn struct { closedCh chan struct{} closeOnce sync.Once - ioMu sync.Mutex + readMu sync.Mutex + writeMu sync.Mutex waiters atomic.Int32 createdAtNano atomic.Int64 @@ -280,7 +290,7 @@ func (c *openAIWSConn) close() { }) } -func (c *openAIWSConn) writeJSONWithTimeout(value any, timeout time.Duration) error { +func (c *openAIWSConn) writeJSONWithTimeout(parent context.Context, value any, timeout time.Duration) error { if c == nil { return errOpenAIWSConnClosed } @@ -290,7 +300,10 @@ func (c *openAIWSConn) writeJSONWithTimeout(value any, timeout time.Duration) er default: } - writeCtx := context.Background() + writeCtx := parent + if writeCtx == nil { + writeCtx = context.Background() + } if timeout <= 0 { return c.writeJSON(value, writeCtx) } @@ -301,8 +314,8 @@ func (c *openAIWSConn) writeJSONWithTimeout(value any, timeout time.Duration) er } func (c *openAIWSConn) writeJSON(value any, writeCtx context.Context) error { - c.ioMu.Lock() - defer c.ioMu.Unlock() + c.writeMu.Lock() + defer c.writeMu.Unlock() if c.ws == nil { return errOpenAIWSConnClosed } @@ -342,8 +355,8 @@ func (c *openAIWSConn) readMessageWithContextTimeout(parent context.Context, tim } func (c *openAIWSConn) readMessage(readCtx context.Context) ([]byte, error) { - c.ioMu.Lock() - defer c.ioMu.Unlock() + c.readMu.Lock() + defer c.readMu.Unlock() if c.ws == nil { return nil, errOpenAIWSConnClosed } @@ -368,8 +381,8 @@ func (c *openAIWSConn) pingWithTimeout(timeout time.Duration) error { default: } - c.ioMu.Lock() - defer c.ioMu.Unlock() + c.writeMu.Lock() + defer c.writeMu.Unlock() if c.ws == nil { return errOpenAIWSConnClosed } @@ -508,13 +521,19 @@ type openAIWSConnPool struct { seq atomic.Uint64 metrics openAIWSPoolMetrics + + workerStopCh chan struct{} + closeOnce sync.Once } func newOpenAIWSConnPool(cfg *config.Config) *openAIWSConnPool { - return &openAIWSConnPool{ + pool := &openAIWSConnPool{ cfg: cfg, clientDialer: newDefaultOpenAIWSClientDialer(), + workerStopCh: make(chan struct{}), } + pool.startBackgroundWorkers() + return pool } func (p *openAIWSConnPool) SnapshotMetrics() OpenAIWSPoolMetricsSnapshot { @@ -551,6 +570,152 @@ func (p *openAIWSConnPool) setClientDialerForTest(dialer openAIWSClientDialer) { p.clientDialer = dialer } +// Close 停止后台 worker 并关闭所有空闲连接,应在优雅关闭时调用。 +func (p *openAIWSConnPool) Close() { + if p == nil { + return + } + p.closeOnce.Do(func() { + close(p.workerStopCh) + // 遍历所有账户池,关闭全部空闲连接。 + p.accounts.Range(func(key, value any) bool { + ap, ok := value.(*openAIWSAccountPool) + if !ok || ap == nil { + return true + } + ap.mu.Lock() + for _, conn := range ap.conns { + if conn != nil && !conn.isLeased() { + conn.close() + } + } + ap.mu.Unlock() + return true + }) + }) +} + +func (p *openAIWSConnPool) startBackgroundWorkers() { + if p == nil || p.workerStopCh == nil { + return + } + go p.runBackgroundPingWorker() + go p.runBackgroundCleanupWorker() +} + +type openAIWSIdlePingCandidate struct { + accountID int64 + conn *openAIWSConn +} + +func (p *openAIWSConnPool) runBackgroundPingWorker() { + if p == nil { + return + } + ticker := time.NewTicker(openAIWSBackgroundPingInterval) + defer ticker.Stop() + for { + select { + case <-ticker.C: + p.runBackgroundPingSweep() + case <-p.workerStopCh: + return + } + } +} + +func (p *openAIWSConnPool) runBackgroundPingSweep() { + if p == nil { + return + } + candidates := p.snapshotIdleConnsForPing() + for _, item := range candidates { + if item.conn == nil || item.conn.isLeased() || item.conn.waiters.Load() > 0 { + continue + } + if err := item.conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + p.evictConn(item.accountID, item.conn.id) + } + } +} + +func (p *openAIWSConnPool) snapshotIdleConnsForPing() []openAIWSIdlePingCandidate { + if p == nil { + return nil + } + candidates := make([]openAIWSIdlePingCandidate, 0) + p.accounts.Range(func(key, value any) bool { + accountID, ok := key.(int64) + if !ok || accountID <= 0 { + return true + } + ap, ok := value.(*openAIWSAccountPool) + if !ok || ap == nil { + return true + } + ap.mu.Lock() + for _, conn := range ap.conns { + if conn == nil || conn.isLeased() || conn.waiters.Load() > 0 { + continue + } + candidates = append(candidates, openAIWSIdlePingCandidate{ + accountID: accountID, + conn: conn, + }) + } + ap.mu.Unlock() + return true + }) + return candidates +} + +func (p *openAIWSConnPool) runBackgroundCleanupWorker() { + if p == nil { + return + } + ticker := time.NewTicker(openAIWSBackgroundSweepTicker) + defer ticker.Stop() + for { + select { + case <-ticker.C: + p.runBackgroundCleanupSweep(time.Now()) + case <-p.workerStopCh: + return + } + } +} + +func (p *openAIWSConnPool) runBackgroundCleanupSweep(now time.Time) { + if p == nil { + return + } + type cleanupResult struct { + evicted []*openAIWSConn + } + results := make([]cleanupResult, 0) + p.accounts.Range(func(_ any, value any) bool { + ap, ok := value.(*openAIWSAccountPool) + if !ok || ap == nil { + return true + } + maxConns := p.maxConnsHardCap() + ap.mu.Lock() + if ap.lastAcquire != nil && ap.lastAcquire.Account != nil { + maxConns = p.effectiveMaxConnsByAccount(ap.lastAcquire.Account) + } + evicted := p.cleanupAccountLocked(ap, now, maxConns) + ap.lastCleanupAt = now + ap.mu.Unlock() + if len(evicted) > 0 { + results = append(results, cleanupResult{evicted: evicted}) + } + return true + }) + for _, result := range results { + closeOpenAIWSConns(result.evicted) + } +} + func (p *openAIWSConnPool) Acquire(ctx context.Context, req openAIWSAcquireRequest) (*openAIWSConnLease, error) { if p != nil { p.metrics.acquireTotal.Add(1) diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index 0b6d517a1..5cac87706 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -42,6 +42,26 @@ func TestOpenAIWSConnPool_CleanupStaleAndTrimIdle(t *testing.T) { require.NotNil(t, ap.conns["idle_new"], "newer idle should be kept") } +func TestOpenAIWSConnLease_WriteJSONAndGuards(t *testing.T) { + conn := newOpenAIWSConn("lease_write", 1, &openAIWSFakeConn{}, nil) + lease := &openAIWSConnLease{conn: conn} + require.NoError(t, lease.WriteJSON(map[string]any{"type": "response.create"}, 0)) + + var nilLease *openAIWSConnLease + err := nilLease.WriteJSONWithContextTimeout(context.Background(), map[string]any{"type": "response.create"}, time.Second) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + + err = (&openAIWSConnLease{}).WriteJSONWithContextTimeout(context.Background(), map[string]any{"type": "response.create"}, time.Second) + require.ErrorIs(t, err, errOpenAIWSConnClosed) +} + +func TestOpenAIWSConn_WriteJSONWithTimeout_NilParentContextUsesBackground(t *testing.T) { + probe := &openAIWSContextProbeConn{} + conn := newOpenAIWSConn("ctx_probe", 1, probe, nil) + require.NoError(t, conn.writeJSONWithTimeout(nil, map[string]any{"type": "response.create"}, 0)) + require.NotNil(t, probe.lastWriteCtx) +} + func TestOpenAIWSConnPool_TargetConnCountAdaptive(t *testing.T) { cfg := &config.Config{} cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 6 @@ -361,6 +381,232 @@ func TestOpenAIWSConnLease_ReadMessageWithContextTimeout_PerRead(t *testing.T) { require.ErrorIs(t, err, context.Canceled) } +func TestOpenAIWSConnLease_WriteJSONWithContextTimeout_RespectsParentContext(t *testing.T) { + conn := newOpenAIWSConn("write_timeout_ctx", 1, &openAIWSWriteBlockingConn{}, nil) + lease := &openAIWSConnLease{conn: conn} + + parentCtx, cancel := context.WithCancel(context.Background()) + go func() { + time.Sleep(20 * time.Millisecond) + cancel() + }() + + start := time.Now() + err := lease.WriteJSONWithContextTimeout(parentCtx, map[string]any{"type": "response.create"}, 2*time.Minute) + elapsed := time.Since(start) + + require.Error(t, err) + require.ErrorIs(t, err, context.Canceled) + require.Less(t, elapsed, 200*time.Millisecond) +} + +func TestOpenAIWSConn_ReadAndWriteCanProceedConcurrently(t *testing.T) { + conn := newOpenAIWSConn("full_duplex", 1, &openAIWSBlockingConn{readDelay: 120 * time.Millisecond}, nil) + + readDone := make(chan error, 1) + go func() { + _, err := conn.readMessageWithContextTimeout(context.Background(), 200*time.Millisecond) + readDone <- err + }() + + // 让读取先占用 readMu。 + time.Sleep(20 * time.Millisecond) + + start := time.Now() + err := conn.pingWithTimeout(50 * time.Millisecond) + elapsed := time.Since(start) + + require.NoError(t, err) + require.Less(t, elapsed, 80*time.Millisecond, "写路径不应被读锁长期阻塞") + require.NoError(t, <-readDone) +} + +func TestOpenAIWSConnPool_BackgroundPingSweep_EvictsDeadIdleConn(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + pool := newOpenAIWSConnPool(cfg) + + accountID := int64(301) + ap := pool.getOrCreateAccountPool(accountID) + conn := newOpenAIWSConn("dead_idle", accountID, &openAIWSPingFailConn{}, nil) + ap.mu.Lock() + ap.conns[conn.id] = conn + ap.mu.Unlock() + + pool.runBackgroundPingSweep() + + ap.mu.Lock() + _, exists := ap.conns[conn.id] + ap.mu.Unlock() + require.False(t, exists, "后台 ping 失败的空闲连接应被回收") +} + +func TestOpenAIWSConnPool_BackgroundCleanupSweep_WithoutAcquire(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 2 + pool := newOpenAIWSConnPool(cfg) + + accountID := int64(302) + ap := pool.getOrCreateAccountPool(accountID) + stale := newOpenAIWSConn("stale_bg", accountID, &openAIWSFakeConn{}, nil) + stale.createdAtNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) + stale.lastUsedNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) + ap.mu.Lock() + ap.conns[stale.id] = stale + ap.mu.Unlock() + + pool.runBackgroundCleanupSweep(time.Now()) + + ap.mu.Lock() + _, exists := ap.conns[stale.id] + ap.mu.Unlock() + require.False(t, exists, "后台清理应在无新 acquire 时也回收过期连接") +} + +func TestOpenAIWSConnPool_BackgroundWorkerGuardBranches(t *testing.T) { + var nilPool *openAIWSConnPool + require.NotPanics(t, func() { + nilPool.startBackgroundWorkers() + nilPool.runBackgroundPingWorker() + nilPool.runBackgroundPingSweep() + _ = nilPool.snapshotIdleConnsForPing() + nilPool.runBackgroundCleanupWorker() + nilPool.runBackgroundCleanupSweep(time.Now()) + }) + + poolNoStop := &openAIWSConnPool{} + require.NotPanics(t, func() { + poolNoStop.startBackgroundWorkers() + }) + + poolStopPing := &openAIWSConnPool{workerStopCh: make(chan struct{})} + pingDone := make(chan struct{}) + go func() { + poolStopPing.runBackgroundPingWorker() + close(pingDone) + }() + close(poolStopPing.workerStopCh) + select { + case <-pingDone: + case <-time.After(500 * time.Millisecond): + t.Fatal("runBackgroundPingWorker 未在 stop 信号后退出") + } + + poolStopCleanup := &openAIWSConnPool{workerStopCh: make(chan struct{})} + cleanupDone := make(chan struct{}) + go func() { + poolStopCleanup.runBackgroundCleanupWorker() + close(cleanupDone) + }() + close(poolStopCleanup.workerStopCh) + select { + case <-cleanupDone: + case <-time.After(500 * time.Millisecond): + t.Fatal("runBackgroundCleanupWorker 未在 stop 信号后退出") + } +} + +func TestOpenAIWSConnPool_SnapshotIdleConnsForPing_SkipsInvalidEntries(t *testing.T) { + pool := &openAIWSConnPool{} + pool.accounts.Store("invalid-key", &openAIWSAccountPool{}) + pool.accounts.Store(int64(123), "invalid-value") + + accountID := int64(123) + ap := &openAIWSAccountPool{ + conns: make(map[string]*openAIWSConn), + } + ap.conns["nil_conn"] = nil + + leased := newOpenAIWSConn("leased", accountID, &openAIWSFakeConn{}, nil) + require.True(t, leased.tryAcquire()) + ap.conns[leased.id] = leased + + waiting := newOpenAIWSConn("waiting", accountID, &openAIWSFakeConn{}, nil) + waiting.waiters.Store(1) + ap.conns[waiting.id] = waiting + + idle := newOpenAIWSConn("idle", accountID, &openAIWSFakeConn{}, nil) + ap.conns[idle.id] = idle + + pool.accounts.Store(accountID, ap) + candidates := pool.snapshotIdleConnsForPing() + require.Len(t, candidates, 1) + require.Equal(t, idle.id, candidates[0].conn.id) +} + +func TestOpenAIWSConnPool_RunBackgroundCleanupSweep_SkipsInvalidAndUsesAccountCap(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 4 + cfg.Gateway.OpenAIWS.DynamicMaxConnsByAccountConcurrencyEnabled = true + + pool := &openAIWSConnPool{cfg: cfg} + pool.accounts.Store("bad-key", "bad-value") + + accountID := int64(2026) + ap := &openAIWSAccountPool{ + conns: make(map[string]*openAIWSConn), + } + stale := newOpenAIWSConn("stale_bg_cleanup", accountID, &openAIWSFakeConn{}, nil) + stale.createdAtNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) + stale.lastUsedNano.Store(time.Now().Add(-2 * time.Hour).UnixNano()) + ap.conns[stale.id] = stale + ap.lastAcquire = &openAIWSAcquireRequest{ + Account: &Account{ + ID: accountID, + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Concurrency: 1, + }, + } + pool.accounts.Store(accountID, ap) + + now := time.Now() + pool.runBackgroundCleanupSweep(now) + + ap.mu.Lock() + _, exists := ap.conns[stale.id] + lastCleanupAt := ap.lastCleanupAt + ap.mu.Unlock() + + require.False(t, exists, "后台清理应清理过期连接") + require.Equal(t, now, lastCleanupAt) +} + +func TestOpenAIWSConnPool_QueueLimitPerConn_DefaultAndConfigured(t *testing.T) { + var nilPool *openAIWSConnPool + require.Equal(t, 256, nilPool.queueLimitPerConn()) + + pool := &openAIWSConnPool{cfg: &config.Config{}} + require.Equal(t, 256, pool.queueLimitPerConn()) + + pool.cfg.Gateway.OpenAIWS.QueueLimitPerConn = 9 + require.Equal(t, 9, pool.queueLimitPerConn()) +} + +func TestOpenAIWSConnPool_Close(t *testing.T) { + cfg := &config.Config{} + pool := newOpenAIWSConnPool(cfg) + + // Close 应该可以安全调用 + pool.Close() + + // workerStopCh 应已关闭 + select { + case <-pool.workerStopCh: + // 预期:channel 已关闭 + default: + t.Fatal("Close 后 workerStopCh 应已关闭") + } + + // 多次调用 Close 不应 panic + pool.Close() + + // nil pool 调用 Close 不应 panic + var nilPool *openAIWSConnPool + nilPool.Close() +} + type openAIWSFakeDialer struct{} func (d *openAIWSFakeDialer) Dial( @@ -505,6 +751,64 @@ func (c *openAIWSBlockingConn) Close() error { return nil } +type openAIWSWriteBlockingConn struct{} + +func (c *openAIWSWriteBlockingConn) WriteJSON(ctx context.Context, _ any) error { + <-ctx.Done() + return ctx.Err() +} + +func (c *openAIWSWriteBlockingConn) ReadMessage(context.Context) ([]byte, error) { + return []byte(`{"type":"response.completed","response":{"id":"resp_write_block"}}`), nil +} + +func (c *openAIWSWriteBlockingConn) Ping(context.Context) error { + return nil +} + +func (c *openAIWSWriteBlockingConn) Close() error { + return nil +} + +type openAIWSPingFailConn struct{} + +func (c *openAIWSPingFailConn) WriteJSON(context.Context, any) error { + return nil +} + +func (c *openAIWSPingFailConn) ReadMessage(context.Context) ([]byte, error) { + return []byte(`{"type":"response.completed","response":{"id":"resp_ping_fail"}}`), nil +} + +func (c *openAIWSPingFailConn) Ping(context.Context) error { + return errors.New("ping failed") +} + +func (c *openAIWSPingFailConn) Close() error { + return nil +} + +type openAIWSContextProbeConn struct { + lastWriteCtx context.Context +} + +func (c *openAIWSContextProbeConn) WriteJSON(ctx context.Context, _ any) error { + c.lastWriteCtx = ctx + return nil +} + +func (c *openAIWSContextProbeConn) ReadMessage(context.Context) ([]byte, error) { + return []byte(`{"type":"response.completed","response":{"id":"resp_ctx_probe"}}`), nil +} + +func (c *openAIWSContextProbeConn) Ping(context.Context) error { + return nil +} + +func (c *openAIWSContextProbeConn) Close() error { + return nil +} + type openAIWSNilConnDialer struct{} func (d *openAIWSNilConnDialer) Dial( diff --git a/backend/internal/service/openai_ws_protocol_resolver.go b/backend/internal/service/openai_ws_protocol_resolver.go index 915adc750..67724a964 100644 --- a/backend/internal/service/openai_ws_protocol_resolver.go +++ b/backend/internal/service/openai_ws_protocol_resolver.go @@ -56,11 +56,16 @@ func (r *defaultOpenAIWSProtocolResolver) Resolve(account *Account) OpenAIWSProt if !wsCfg.Enabled { return openAIWSHTTPDecision("global_disabled") } - if account.IsOpenAIOAuth() && !wsCfg.OAuthEnabled { - return openAIWSHTTPDecision("oauth_disabled") - } - if account.IsOpenAIApiKey() && !wsCfg.APIKeyEnabled { - return openAIWSHTTPDecision("apikey_disabled") + if account.IsOpenAIOAuth() { + if !wsCfg.OAuthEnabled { + return openAIWSHTTPDecision("oauth_disabled") + } + } else if account.IsOpenAIApiKey() { + if !wsCfg.APIKeyEnabled { + return openAIWSHTTPDecision("apikey_disabled") + } + } else { + return openAIWSHTTPDecision("unknown_auth_type") } if !account.IsOpenAIResponsesWebSocketV2Enabled() { return openAIWSHTTPDecision("account_disabled") diff --git a/backend/internal/service/openai_ws_protocol_resolver_test.go b/backend/internal/service/openai_ws_protocol_resolver_test.go index 7b252f60c..7a2dcda9a 100644 --- a/backend/internal/service/openai_ws_protocol_resolver_test.go +++ b/backend/internal/service/openai_ws_protocol_resolver_test.go @@ -106,4 +106,32 @@ func TestOpenAIWSProtocolResolver_Resolve(t *testing.T) { require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) require.Equal(t, "oauth_disabled", decision.Reason) }) + + t.Run("API Key 账号关闭开关时回退HTTP", func(t *testing.T) { + cfg := *baseCfg + cfg.Gateway.OpenAIWS.APIKeyEnabled = false + account := &Account{ + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Extra: map[string]any{ + "openai_apikey_responses_websockets_v2_enabled": true, + }, + } + decision := NewOpenAIWSProtocolResolver(&cfg).Resolve(account) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "apikey_disabled", decision.Reason) + }) + + t.Run("未知认证类型回退HTTP", func(t *testing.T) { + account := &Account{ + Platform: PlatformOpenAI, + Type: "unknown_type", + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + decision := NewOpenAIWSProtocolResolver(baseCfg).Resolve(account) + require.Equal(t, OpenAIUpstreamTransportHTTPSSE, decision.Transport) + require.Equal(t, "unknown_auth_type", decision.Reason) + }) } diff --git a/backend/internal/service/openai_ws_state_store.go b/backend/internal/service/openai_ws_state_store.go index e15ac5100..b606baa1a 100644 --- a/backend/internal/service/openai_ws_state_store.go +++ b/backend/internal/service/openai_ws_state_store.go @@ -16,6 +16,7 @@ const ( openAIWSStateStoreCleanupInterval = time.Minute openAIWSStateStoreCleanupMaxPerMap = 512 openAIWSStateStoreMaxEntriesPerMap = 65536 + openAIWSStateStoreRedisTimeout = 3 * time.Second ) type openAIWSAccountBinding struct { @@ -108,7 +109,9 @@ func (s *defaultOpenAIWSStateStore) BindResponseAccount(ctx context.Context, gro return nil } cacheKey := openAIWSResponseAccountCacheKey(id) - return s.cache.SetSessionAccountID(ctx, groupID, cacheKey, accountID, ttl) + cacheCtx, cancel := withOpenAIWSStateStoreRedisTimeout(ctx) + defer cancel() + return s.cache.SetSessionAccountID(cacheCtx, groupID, cacheKey, accountID, ttl) } func (s *defaultOpenAIWSStateStore) GetResponseAccount(ctx context.Context, groupID int64, responseID string) (int64, error) { @@ -134,7 +137,9 @@ func (s *defaultOpenAIWSStateStore) GetResponseAccount(ctx context.Context, grou } cacheKey := openAIWSResponseAccountCacheKey(id) - accountID, err := s.cache.GetSessionAccountID(ctx, groupID, cacheKey) + cacheCtx, cancel := withOpenAIWSStateStoreRedisTimeout(ctx) + defer cancel() + accountID, err := s.cache.GetSessionAccountID(cacheCtx, groupID, cacheKey) if err != nil || accountID <= 0 { // 缓存读取失败不阻断主流程,按未命中降级。 return 0, nil @@ -154,7 +159,9 @@ func (s *defaultOpenAIWSStateStore) DeleteResponseAccount(ctx context.Context, g if s.cache == nil { return nil } - return s.cache.DeleteSessionAccountID(ctx, groupID, openAIWSResponseAccountCacheKey(id)) + cacheCtx, cancel := withOpenAIWSStateStoreRedisTimeout(ctx) + defer cancel() + return s.cache.DeleteSessionAccountID(cacheCtx, groupID, openAIWSResponseAccountCacheKey(id)) } func (s *defaultOpenAIWSStateStore) BindResponseConn(responseID, connID string, ttl time.Duration) { @@ -424,3 +431,10 @@ func openAIWSSessionTurnStateKey(groupID int64, sessionHash string) string { } return fmt.Sprintf("%d:%s", groupID, hash) } + +func withOpenAIWSStateStoreRedisTimeout(ctx context.Context) (context.Context, context.CancelFunc) { + if ctx == nil { + ctx = context.Background() + } + return context.WithTimeout(ctx, openAIWSStateStoreRedisTimeout) +} diff --git a/backend/internal/service/openai_ws_state_store_test.go b/backend/internal/service/openai_ws_state_store_test.go index 17c54cac2..51cb4e99a 100644 --- a/backend/internal/service/openai_ws_state_store_test.go +++ b/backend/internal/service/openai_ws_state_store_test.go @@ -2,6 +2,7 @@ package service import ( "context" + "errors" "fmt" "testing" "time" @@ -154,3 +155,81 @@ func TestEnsureBindingCapacity_DoesNotEvictWhenUpdatingExistingKey(t *testing.T) require.Len(t, bindings, 2) require.Equal(t, 9, bindings["a"]) } + +type openAIWSStateStoreTimeoutProbeCache struct { + setHasDeadline bool + getHasDeadline bool + deleteHasDeadline bool + setDeadlineDelta time.Duration + getDeadlineDelta time.Duration + delDeadlineDelta time.Duration +} + +func (c *openAIWSStateStoreTimeoutProbeCache) GetSessionAccountID(ctx context.Context, _ int64, _ string) (int64, error) { + if deadline, ok := ctx.Deadline(); ok { + c.getHasDeadline = true + c.getDeadlineDelta = time.Until(deadline) + } + return 123, nil +} + +func (c *openAIWSStateStoreTimeoutProbeCache) SetSessionAccountID(ctx context.Context, _ int64, _ string, _ int64, _ time.Duration) error { + if deadline, ok := ctx.Deadline(); ok { + c.setHasDeadline = true + c.setDeadlineDelta = time.Until(deadline) + } + return errors.New("set failed") +} + +func (c *openAIWSStateStoreTimeoutProbeCache) RefreshSessionTTL(context.Context, int64, string, time.Duration) error { + return nil +} + +func (c *openAIWSStateStoreTimeoutProbeCache) DeleteSessionAccountID(ctx context.Context, _ int64, _ string) error { + if deadline, ok := ctx.Deadline(); ok { + c.deleteHasDeadline = true + c.delDeadlineDelta = time.Until(deadline) + } + return nil +} + +func TestOpenAIWSStateStore_RedisOpsUseShortTimeout(t *testing.T) { + probe := &openAIWSStateStoreTimeoutProbeCache{} + store := NewOpenAIWSStateStore(probe) + ctx := context.Background() + groupID := int64(5) + + err := store.BindResponseAccount(ctx, groupID, "resp_timeout_probe", 11, time.Minute) + require.Error(t, err) + + accountID, getErr := store.GetResponseAccount(ctx, groupID, "resp_timeout_probe") + require.NoError(t, getErr) + require.Equal(t, int64(11), accountID, "本地缓存命中应优先返回已绑定账号") + + require.NoError(t, store.DeleteResponseAccount(ctx, groupID, "resp_timeout_probe")) + + require.True(t, probe.setHasDeadline, "SetSessionAccountID 应携带独立超时上下文") + require.True(t, probe.deleteHasDeadline, "DeleteSessionAccountID 应携带独立超时上下文") + require.False(t, probe.getHasDeadline, "GetSessionAccountID 本用例应由本地缓存命中,不触发 Redis 读取") + require.Greater(t, probe.setDeadlineDelta, 2*time.Second) + require.LessOrEqual(t, probe.setDeadlineDelta, 3*time.Second) + require.Greater(t, probe.delDeadlineDelta, 2*time.Second) + require.LessOrEqual(t, probe.delDeadlineDelta, 3*time.Second) + + probe2 := &openAIWSStateStoreTimeoutProbeCache{} + store2 := NewOpenAIWSStateStore(probe2) + accountID2, err2 := store2.GetResponseAccount(ctx, groupID, "resp_cache_only") + require.NoError(t, err2) + require.Equal(t, int64(123), accountID2) + require.True(t, probe2.getHasDeadline, "GetSessionAccountID 在缓存未命中时应携带独立超时上下文") + require.Greater(t, probe2.getDeadlineDelta, 2*time.Second) + require.LessOrEqual(t, probe2.getDeadlineDelta, 3*time.Second) +} + +func TestWithOpenAIWSStateStoreRedisTimeout_NilContext(t *testing.T) { + ctx, cancel := withOpenAIWSStateStoreRedisTimeout(nil) + defer cancel() + require.NotNil(t, ctx) + _, ok := ctx.Deadline() + require.True(t, ok, "nil 上下文应回退到 background 并附加短超时") +} From 1e8f9570701bd25d9fa70643d291fea3e10d6f7e Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 17:22:29 +0800 Subject: [PATCH 022/120] =?UTF-8?q?fix(openai-ws):=20=E4=BF=AE=E5=A4=8D=20?= =?UTF-8?q?ingress=20ws=20=E9=87=8D=E8=AF=95=E9=92=A9=E5=AD=90=E4=B8=8E?= =?UTF-8?q?=E9=A2=84=E6=A3=80=20ping=20=E5=9B=9E=E5=BD=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../internal/service/openai_ws_forwarder.go | 157 ++++++- ...penai_ws_forwarder_ingress_session_test.go | 432 ++++++++++++++++++ backend/internal/service/openai_ws_pool.go | 7 + .../internal/service/openai_ws_pool_test.go | 10 + 4 files changed, 598 insertions(+), 8 deletions(-) diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 0b017552c..1d779f7f0 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -64,6 +64,8 @@ var openAIWSLogValueReplacer = strings.NewReplacer( "failed", "fail", ) +var openAIWSIngressPreflightPingIdle = 20 * time.Second + // openAIWSFallbackError 表示可安全回退到 HTTP 的 WS 错误(尚未写下游)。 type openAIWSFallbackError struct { Reason string @@ -98,6 +100,70 @@ type OpenAIWSClientCloseError struct { err error } +type openAIWSIngressTurnError struct { + stage string + cause error + wroteDownstream bool +} + +func (e *openAIWSIngressTurnError) Error() string { + if e == nil { + return "" + } + if e.cause == nil { + return strings.TrimSpace(e.stage) + } + return e.cause.Error() +} + +func (e *openAIWSIngressTurnError) Unwrap() error { + if e == nil { + return nil + } + return e.cause +} + +func wrapOpenAIWSIngressTurnError(stage string, cause error, wroteDownstream bool) error { + if cause == nil { + return nil + } + return &openAIWSIngressTurnError{ + stage: strings.TrimSpace(stage), + cause: cause, + wroteDownstream: wroteDownstream, + } +} + +func isOpenAIWSIngressTurnRetryable(err error) bool { + var turnErr *openAIWSIngressTurnError + if !errors.As(err, &turnErr) || turnErr == nil { + return false + } + if errors.Is(turnErr.cause, context.Canceled) || errors.Is(turnErr.cause, context.DeadlineExceeded) { + return false + } + if turnErr.wroteDownstream { + return false + } + switch turnErr.stage { + case "write_upstream", "read_upstream": + return true + default: + return false + } +} + +func openAIWSIngressTurnRetryReason(err error) string { + var turnErr *openAIWSIngressTurnError + if !errors.As(err, &turnErr) || turnErr == nil { + return "unknown" + } + if turnErr.stage == "" { + return "unknown" + } + return turnErr.stage +} + // NewOpenAIWSClientCloseError 创建一个客户端 WS 关闭错误。 func NewOpenAIWSClientCloseError(statusCode coderws.StatusCode, reason string, err error) error { return &OpenAIWSClientCloseError{ @@ -1981,8 +2047,13 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return nil, errors.New("upstream websocket lease is nil") } turnStart := time.Now() + wroteDownstream := false if err := lease.WriteJSONWithContextTimeout(ctx, payload, s.openAIWSWriteTimeout()); err != nil { - return nil, fmt.Errorf("write upstream websocket request: %w", err) + return nil, wrapOpenAIWSIngressTurnError( + "write_upstream", + fmt.Errorf("write upstream websocket request: %w", err), + false, + ) } logOpenAIWSModeInfo( "ingress_ws_turn_request_sent account_id=%d turn=%d conn_id=%s payload_bytes=%d", @@ -2018,7 +2089,11 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( upstreamMessage, readErr := lease.ReadMessageWithContextTimeout(ctx, s.openAIWSReadTimeout()) if readErr != nil { lease.MarkBroken() - return nil, fmt.Errorf("read upstream websocket event: %w", readErr) + return nil, wrapOpenAIWSIngressTurnError( + "read_upstream", + fmt.Errorf("read upstream websocket event: %w", readErr), + wroteDownstream, + ) } if responseID == "" { @@ -2070,8 +2145,14 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( truncateOpenAIWSLogValue(closeReason, openAIWSHeaderValueMaxLen), ) } else { - return nil, fmt.Errorf("write client websocket event: %w", err) + return nil, wrapOpenAIWSIngressTurnError( + "write_client", + fmt.Errorf("write client websocket event: %w", err), + wroteDownstream, + ) } + } else { + wroteDownstream = true } } if isTerminalEvent { @@ -2133,12 +2214,16 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( defer releaseSessionLease() turn := 1 + turnRetry := 0 + lastTurnFinishedAt := time.Time{} + skipBeforeTurn := false for { - if hooks != nil && hooks.BeforeTurn != nil { + if !skipBeforeTurn && hooks != nil && hooks.BeforeTurn != nil { if err := hooks.BeforeTurn(turn); err != nil { return err } } + skipBeforeTurn = false if sessionLease == nil { acquiredLease, acquireErr := acquireTurnLease(turn, preferredConnID) if acquireErr != nil { @@ -2147,15 +2232,71 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( sessionLease = acquiredLease sessionConnID = strings.TrimSpace(sessionLease.ConnID()) } + shouldPreflightPing := turn > 1 && sessionLease != nil && turnRetry == 0 + if shouldPreflightPing && openAIWSIngressPreflightPingIdle > 0 && !lastTurnFinishedAt.IsZero() { + if time.Since(lastTurnFinishedAt) < openAIWSIngressPreflightPingIdle { + shouldPreflightPing = false + } + } + if shouldPreflightPing { + if pingErr := sessionLease.PingWithTimeout(openAIWSConnHealthCheckTO); pingErr != nil { + logOpenAIWSModeInfo( + "ingress_ws_upstream_preflight_ping_fail account_id=%d turn=%d conn_id=%s cause=%s", + account.ID, + turn, + truncateOpenAIWSLogValue(sessionConnID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(pingErr.Error(), openAIWSLogValueMaxLen), + ) + sessionLease.MarkBroken() + releaseSessionLease() + sessionLease = nil + sessionConnID = "" + preferredConnID = "" + + acquiredLease, acquireErr := acquireTurnLease(turn, preferredConnID) + if acquireErr != nil { + return fmt.Errorf("acquire upstream websocket after preflight ping fail: %w", acquireErr) + } + sessionLease = acquiredLease + sessionConnID = strings.TrimSpace(sessionLease.ConnID()) + } + } connID := sessionConnID result, relayErr := sendAndRelay(turn, sessionLease, currentPayload, currentPayloadBytes, currentOriginalModel) - if hooks != nil && hooks.AfterTurn != nil { - hooks.AfterTurn(turn, result, relayErr) - } if relayErr != nil { + if isOpenAIWSIngressTurnRetryable(relayErr) && turnRetry < 1 { + turnRetry++ + logOpenAIWSModeInfo( + "ingress_ws_turn_retry account_id=%d turn=%d retry=%d reason=%s conn_id=%s", + account.ID, + turn, + turnRetry, + truncateOpenAIWSLogValue(openAIWSIngressTurnRetryReason(relayErr), openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(connID, openAIWSIDValueMaxLen), + ) + sessionLease.MarkBroken() + releaseSessionLease() + sessionLease = nil + sessionConnID = "" + preferredConnID = "" + skipBeforeTurn = true + continue + } + finalErr := relayErr + if unwrapped := errors.Unwrap(relayErr); unwrapped != nil { + finalErr = unwrapped + } + if hooks != nil && hooks.AfterTurn != nil { + hooks.AfterTurn(turn, nil, finalErr) + } sessionLease.MarkBroken() - return relayErr + return finalErr + } + turnRetry = 0 + lastTurnFinishedAt = time.Now() + if hooks != nil && hooks.AfterTurn != nil { + hooks.AfterTurn(turn, result, nil) } if result == nil { return errors.New("websocket turn result is nil") diff --git a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go index a2d456bb0..ab690da81 100644 --- a/backend/internal/service/openai_ws_forwarder_ingress_session_test.go +++ b/backend/internal/service/openai_ws_forwarder_ingress_session_test.go @@ -3,9 +3,11 @@ package service import ( "context" "errors" + "io" "net/http" "net/http/httptest" "strings" + "sync" "testing" "time" @@ -162,6 +164,436 @@ func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_KeepLeaseAcrossT require.Len(t, captureConn.writes, 2, "应向同一上游连接发送两轮 response.create") } +func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_PreflightPingFailReconnectsBeforeTurn(t *testing.T) { + gin.SetMode(gin.TestMode) + prevPreflightPingIdle := openAIWSIngressPreflightPingIdle + openAIWSIngressPreflightPingIdle = 0 + defer func() { + openAIWSIngressPreflightPingIdle = prevPreflightPingIdle + }() + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + firstConn := &openAIWSPreflightFailConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_turn_ping_1","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + }, + } + secondConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_turn_ping_2","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + }, + } + dialer := &openAIWSQueueDialer{ + conns: []openAIWSClientConn{firstConn, secondConn}, + } + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(dialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 116, + Name: "openai-ingress-preflight-ping", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + + serverErrCh := make(chan error, 1) + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := coderws.Accept(w, r, &coderws.AcceptOptions{ + CompressionMode: coderws.CompressionContextTakeover, + }) + if err != nil { + serverErrCh <- err + return + } + defer func() { + _ = conn.CloseNow() + }() + + rec := httptest.NewRecorder() + ginCtx, _ := gin.CreateTestContext(rec) + req := r.Clone(r.Context()) + req.Header = req.Header.Clone() + req.Header.Set("User-Agent", "unit-test-agent/1.0") + ginCtx.Request = req + + readCtx, cancel := context.WithTimeout(r.Context(), 3*time.Second) + msgType, firstMessage, readErr := conn.Read(readCtx) + cancel() + if readErr != nil { + serverErrCh <- readErr + return + } + if msgType != coderws.MessageText && msgType != coderws.MessageBinary { + serverErrCh <- errors.New("unsupported websocket client message type") + return + } + + serverErrCh <- svc.ProxyResponsesWebSocketFromClient(r.Context(), ginCtx, conn, account, "sk-test", firstMessage, nil) + })) + defer wsServer.Close() + + dialCtx, cancelDial := context.WithTimeout(context.Background(), 3*time.Second) + clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http"), nil) + cancelDial() + require.NoError(t, err) + defer func() { + _ = clientConn.CloseNow() + }() + + writeMessage := func(payload string) { + writeCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + require.NoError(t, clientConn.Write(writeCtx, coderws.MessageText, []byte(payload))) + } + readMessage := func() []byte { + readCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + msgType, message, readErr := clientConn.Read(readCtx) + require.NoError(t, readErr) + require.Equal(t, coderws.MessageText, msgType) + return message + } + + writeMessage(`{"type":"response.create","model":"gpt-5.1","stream":false}`) + firstTurn := readMessage() + require.Equal(t, "resp_turn_ping_1", gjson.GetBytes(firstTurn, "response.id").String()) + + writeMessage(`{"type":"response.create","model":"gpt-5.1","stream":false,"previous_response_id":"resp_turn_ping_1"}`) + secondTurn := readMessage() + require.Equal(t, "resp_turn_ping_2", gjson.GetBytes(secondTurn, "response.id").String()) + + require.NoError(t, clientConn.Close(coderws.StatusNormalClosure, "done")) + select { + case serverErr := <-serverErrCh: + require.NoError(t, serverErr) + case <-time.After(5 * time.Second): + t.Fatal("等待 ingress websocket 结束超时") + } + require.Equal(t, 2, dialer.DialCount(), "第二轮 turn 前 ping 失败应触发换连") + require.Equal(t, 1, firstConn.WriteCount(), "preflight ping 失败后不应继续向旧连接发送第二轮 turn") + require.GreaterOrEqual(t, firstConn.PingCount(), 1, "第二轮前应对旧连接执行 preflight ping") +} + +func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_WriteFailBeforeDownstreamRetriesOnce(t *testing.T) { + gin.SetMode(gin.TestMode) + + cfg := &config.Config{} + cfg.Security.URLAllowlist.Enabled = false + cfg.Security.URLAllowlist.AllowInsecureHTTP = true + cfg.Gateway.OpenAIWS.Enabled = true + cfg.Gateway.OpenAIWS.OAuthEnabled = true + cfg.Gateway.OpenAIWS.APIKeyEnabled = true + cfg.Gateway.OpenAIWS.ResponsesWebsocketsV2 = true + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.MaxIdlePerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 8 + cfg.Gateway.OpenAIWS.DialTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.ReadTimeoutSeconds = 3 + cfg.Gateway.OpenAIWS.WriteTimeoutSeconds = 3 + + firstConn := &openAIWSWriteFailAfterFirstTurnConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_turn_write_retry_1","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + }, + } + secondConn := &openAIWSCaptureConn{ + events: [][]byte{ + []byte(`{"type":"response.completed","response":{"id":"resp_turn_write_retry_2","model":"gpt-5.1","usage":{"input_tokens":1,"output_tokens":1}}}`), + }, + } + dialer := &openAIWSQueueDialer{ + conns: []openAIWSClientConn{firstConn, secondConn}, + } + pool := newOpenAIWSConnPool(cfg) + pool.setClientDialerForTest(dialer) + + svc := &OpenAIGatewayService{ + cfg: cfg, + httpUpstream: &httpUpstreamRecorder{}, + cache: &stubGatewayCache{}, + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + toolCorrector: NewCodexToolCorrector(), + openaiWSPool: pool, + } + + account := &Account{ + ID: 117, + Name: "openai-ingress-write-retry", + Platform: PlatformOpenAI, + Type: AccountTypeAPIKey, + Status: StatusActive, + Schedulable: true, + Concurrency: 1, + Credentials: map[string]any{ + "api_key": "sk-test", + }, + Extra: map[string]any{ + "responses_websockets_v2_enabled": true, + }, + } + var hooksMu sync.Mutex + beforeTurnCalls := make(map[int]int) + afterTurnCalls := make(map[int]int) + hooks := &OpenAIWSIngressHooks{ + BeforeTurn: func(turn int) error { + hooksMu.Lock() + beforeTurnCalls[turn]++ + hooksMu.Unlock() + return nil + }, + AfterTurn: func(turn int, _ *OpenAIForwardResult, _ error) { + hooksMu.Lock() + afterTurnCalls[turn]++ + hooksMu.Unlock() + }, + } + + serverErrCh := make(chan error, 1) + wsServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + conn, err := coderws.Accept(w, r, &coderws.AcceptOptions{ + CompressionMode: coderws.CompressionContextTakeover, + }) + if err != nil { + serverErrCh <- err + return + } + defer func() { + _ = conn.CloseNow() + }() + + rec := httptest.NewRecorder() + ginCtx, _ := gin.CreateTestContext(rec) + req := r.Clone(r.Context()) + req.Header = req.Header.Clone() + req.Header.Set("User-Agent", "unit-test-agent/1.0") + ginCtx.Request = req + + readCtx, cancel := context.WithTimeout(r.Context(), 3*time.Second) + msgType, firstMessage, readErr := conn.Read(readCtx) + cancel() + if readErr != nil { + serverErrCh <- readErr + return + } + if msgType != coderws.MessageText && msgType != coderws.MessageBinary { + serverErrCh <- errors.New("unsupported websocket client message type") + return + } + + serverErrCh <- svc.ProxyResponsesWebSocketFromClient(r.Context(), ginCtx, conn, account, "sk-test", firstMessage, hooks) + })) + defer wsServer.Close() + + dialCtx, cancelDial := context.WithTimeout(context.Background(), 3*time.Second) + clientConn, _, err := coderws.Dial(dialCtx, "ws"+strings.TrimPrefix(wsServer.URL, "http"), nil) + cancelDial() + require.NoError(t, err) + defer func() { + _ = clientConn.CloseNow() + }() + + writeMessage := func(payload string) { + writeCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + require.NoError(t, clientConn.Write(writeCtx, coderws.MessageText, []byte(payload))) + } + readMessage := func() []byte { + readCtx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + defer cancel() + msgType, message, readErr := clientConn.Read(readCtx) + require.NoError(t, readErr) + require.Equal(t, coderws.MessageText, msgType) + return message + } + + writeMessage(`{"type":"response.create","model":"gpt-5.1","stream":false}`) + firstTurn := readMessage() + require.Equal(t, "resp_turn_write_retry_1", gjson.GetBytes(firstTurn, "response.id").String()) + + writeMessage(`{"type":"response.create","model":"gpt-5.1","stream":false,"previous_response_id":"resp_turn_write_retry_1"}`) + secondTurn := readMessage() + require.Equal(t, "resp_turn_write_retry_2", gjson.GetBytes(secondTurn, "response.id").String()) + + require.NoError(t, clientConn.Close(coderws.StatusNormalClosure, "done")) + select { + case serverErr := <-serverErrCh: + require.NoError(t, serverErr) + case <-time.After(5 * time.Second): + t.Fatal("等待 ingress websocket 结束超时") + } + require.Equal(t, 2, dialer.DialCount(), "第二轮 turn 上游写失败且未写下游时应自动重试并换连") + hooksMu.Lock() + beforeTurn1 := beforeTurnCalls[1] + beforeTurn2 := beforeTurnCalls[2] + afterTurn1 := afterTurnCalls[1] + afterTurn2 := afterTurnCalls[2] + hooksMu.Unlock() + require.Equal(t, 1, beforeTurn1, "首轮 turn BeforeTurn 应执行一次") + require.Equal(t, 1, beforeTurn2, "同一 turn 重试不应重复触发 BeforeTurn") + require.Equal(t, 1, afterTurn1, "首轮 turn AfterTurn 应执行一次") + require.Equal(t, 1, afterTurn2, "第二轮 turn AfterTurn 应执行一次") +} + +type openAIWSQueueDialer struct { + mu sync.Mutex + conns []openAIWSClientConn + dialCount int +} + +func (d *openAIWSQueueDialer) Dial( + ctx context.Context, + wsURL string, + headers http.Header, + proxyURL string, +) (openAIWSClientConn, int, http.Header, error) { + _ = ctx + _ = wsURL + _ = headers + _ = proxyURL + d.mu.Lock() + defer d.mu.Unlock() + d.dialCount++ + if len(d.conns) == 0 { + return nil, 503, nil, errors.New("no test conn") + } + conn := d.conns[0] + if len(d.conns) > 1 { + d.conns = d.conns[1:] + } + return conn, 0, nil, nil +} + +func (d *openAIWSQueueDialer) DialCount() int { + d.mu.Lock() + defer d.mu.Unlock() + return d.dialCount +} + +type openAIWSPreflightFailConn struct { + mu sync.Mutex + events [][]byte + pingFails bool + writeCount int + pingCount int +} + +func (c *openAIWSPreflightFailConn) WriteJSON(context.Context, any) error { + c.mu.Lock() + c.writeCount++ + c.mu.Unlock() + return nil +} + +func (c *openAIWSPreflightFailConn) ReadMessage(context.Context) ([]byte, error) { + c.mu.Lock() + defer c.mu.Unlock() + if len(c.events) == 0 { + return nil, io.EOF + } + event := c.events[0] + c.events = c.events[1:] + if len(c.events) == 0 { + c.pingFails = true + } + return event, nil +} + +func (c *openAIWSPreflightFailConn) Ping(context.Context) error { + c.mu.Lock() + defer c.mu.Unlock() + c.pingCount++ + if c.pingFails { + return errors.New("preflight ping failed") + } + return nil +} + +func (c *openAIWSPreflightFailConn) Close() error { + return nil +} + +func (c *openAIWSPreflightFailConn) WriteCount() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.writeCount +} + +func (c *openAIWSPreflightFailConn) PingCount() int { + c.mu.Lock() + defer c.mu.Unlock() + return c.pingCount +} + +type openAIWSWriteFailAfterFirstTurnConn struct { + mu sync.Mutex + events [][]byte + failOnWrite bool +} + +func (c *openAIWSWriteFailAfterFirstTurnConn) WriteJSON(context.Context, any) error { + c.mu.Lock() + defer c.mu.Unlock() + if c.failOnWrite { + return errors.New("write failed on stale conn") + } + return nil +} + +func (c *openAIWSWriteFailAfterFirstTurnConn) ReadMessage(context.Context) ([]byte, error) { + c.mu.Lock() + defer c.mu.Unlock() + if len(c.events) == 0 { + return nil, io.EOF + } + event := c.events[0] + c.events = c.events[1:] + if len(c.events) == 0 { + c.failOnWrite = true + } + return event, nil +} + +func (c *openAIWSWriteFailAfterFirstTurnConn) Ping(context.Context) error { + return nil +} + +func (c *openAIWSWriteFailAfterFirstTurnConn) Close() error { + return nil +} + func TestOpenAIGatewayService_ProxyResponsesWebSocketFromClient_ClientDisconnectStillDrainsUpstream(t *testing.T) { gin.SetMode(gin.TestMode) diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index ee8f1256a..50e114114 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -167,6 +167,13 @@ func (l *openAIWSConnLease) ReadMessageWithContextTimeout(ctx context.Context, t return l.conn.readMessageWithContextTimeout(ctx, timeout) } +func (l *openAIWSConnLease) PingWithTimeout(timeout time.Duration) error { + if l == nil || l.conn == nil { + return errOpenAIWSConnClosed + } + return l.conn.pingWithTimeout(timeout) +} + func (l *openAIWSConnLease) MarkBroken() { if l == nil || l.pool == nil || l.conn == nil { return diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index 5cac87706..b612cc0f0 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -400,6 +400,16 @@ func TestOpenAIWSConnLease_WriteJSONWithContextTimeout_RespectsParentContext(t * require.Less(t, elapsed, 200*time.Millisecond) } +func TestOpenAIWSConnLease_PingWithTimeout(t *testing.T) { + conn := newOpenAIWSConn("ping_ok", 1, &openAIWSFakeConn{}, nil) + lease := &openAIWSConnLease{conn: conn} + require.NoError(t, lease.PingWithTimeout(50*time.Millisecond)) + + var nilLease *openAIWSConnLease + err := nilLease.PingWithTimeout(50 * time.Millisecond) + require.ErrorIs(t, err, errOpenAIWSConnClosed) +} + func TestOpenAIWSConn_ReadAndWriteCanProceedConcurrently(t *testing.T) { conn := newOpenAIWSConn("full_duplex", 1, &openAIWSBlockingConn{readDelay: 120 * time.Millisecond}, nil) From cf3ccc26e2f50822f60606c20cf00422bbc10ff8 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 17:38:06 +0800 Subject: [PATCH 023/120] =?UTF-8?q?perf(openai-ws):=20=E4=BC=98=E5=8C=96?= =?UTF-8?q?=20ws=20=E7=83=AD=E8=B7=AF=E5=BE=84=E8=A7=A3=E6=9E=90=E4=B8=8E?= =?UTF-8?q?=E6=B5=81=E5=BC=8F=E5=86=99=E5=85=A5=E5=BC=80=E9=94=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../internal/service/openai_ws_forwarder.go | 270 ++++++++++++------ .../openai_ws_forwarder_benchmark_test.go | 50 ++++ ..._ws_forwarder_hotpath_optimization_test.go | 73 +++++ 3 files changed, 309 insertions(+), 84 deletions(-) create mode 100644 backend/internal/service/openai_ws_forwarder_hotpath_optimization_test.go diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index 1d779f7f0..fc83283a6 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -304,7 +304,7 @@ func shouldLogOpenAIWSBufferedEvent(idx int) bool { } func openAIWSEventMayContainModel(eventType string) bool { - switch strings.TrimSpace(eventType) { + switch eventType { case "response.created", "response.in_progress", "response.completed", @@ -315,7 +315,23 @@ func openAIWSEventMayContainModel(eventType string) bool { "response.canceled": return true default: - return false + trimmed := strings.TrimSpace(eventType) + if trimmed == eventType { + return false + } + switch trimmed { + case "response.created", + "response.in_progress", + "response.completed", + "response.done", + "response.failed", + "response.incomplete", + "response.cancelled", + "response.canceled": + return true + default: + return false + } } } @@ -336,17 +352,67 @@ func openAIWSEventMayContainToolCalls(eventType string) bool { } func openAIWSEventShouldParseUsage(eventType string) bool { - return strings.TrimSpace(eventType) == "response.completed" + return eventType == "response.completed" || strings.TrimSpace(eventType) == "response.completed" +} + +func parseOpenAIWSEventEnvelope(message []byte) (eventType string, responseID string, response gjson.Result) { + if len(message) == 0 { + return "", "", gjson.Result{} + } + values := gjson.GetManyBytes(message, "type", "response.id", "id", "response") + eventType = strings.TrimSpace(values[0].String()) + if id := strings.TrimSpace(values[1].String()); id != "" { + responseID = id + } else { + responseID = strings.TrimSpace(values[2].String()) + } + return eventType, responseID, values[3] +} + +func openAIWSMessageLikelyContainsToolCalls(message []byte) bool { + if len(message) == 0 { + return false + } + return bytes.Contains(message, []byte(`"tool_calls"`)) || + bytes.Contains(message, []byte(`"tool_call"`)) || + bytes.Contains(message, []byte(`"function_call"`)) +} + +func parseOpenAIWSResponseUsageFromCompletedEvent(message []byte, usage *OpenAIUsage) { + if usage == nil || len(message) == 0 { + return + } + values := gjson.GetManyBytes( + message, + "response.usage.input_tokens", + "response.usage.output_tokens", + "response.usage.input_tokens_details.cached_tokens", + ) + usage.InputTokens = int(values[0].Int()) + usage.OutputTokens = int(values[1].Int()) + usage.CacheReadInputTokens = int(values[2].Int()) +} + +func parseOpenAIWSErrorEventFields(message []byte) (code string, errType string, errMessage string) { + if len(message) == 0 { + return "", "", "" + } + values := gjson.GetManyBytes(message, "error.code", "error.type", "error.message") + return strings.TrimSpace(values[0].String()), strings.TrimSpace(values[1].String()), strings.TrimSpace(values[2].String()) +} + +func summarizeOpenAIWSErrorEventFieldsFromRaw(codeRaw, errTypeRaw, errMessageRaw string) (code string, errType string, errMessage string) { + code = truncateOpenAIWSLogValue(codeRaw, openAIWSLogValueMaxLen) + errType = truncateOpenAIWSLogValue(errTypeRaw, openAIWSLogValueMaxLen) + errMessage = truncateOpenAIWSLogValue(errMessageRaw, openAIWSLogValueMaxLen) + return code, errType, errMessage } func summarizeOpenAIWSErrorEventFields(message []byte) (code string, errType string, errMessage string) { if len(message) == 0 { return "-", "-", "-" } - code = truncateOpenAIWSLogValue(gjson.GetBytes(message, "error.code").String(), openAIWSLogValueMaxLen) - errType = truncateOpenAIWSLogValue(gjson.GetBytes(message, "error.type").String(), openAIWSLogValueMaxLen) - errMessage = truncateOpenAIWSLogValue(gjson.GetBytes(message, "error.message").String(), openAIWSLogValueMaxLen) - return code, errType, errMessage + return summarizeOpenAIWSErrorEventFieldsFromRaw(parseOpenAIWSErrorEventFields(message)) } func summarizeOpenAIWSPayloadKeySizes(payload map[string]any, topN int) string { @@ -1435,6 +1501,10 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( var finalResponse []byte wroteDownstream := false needModelReplace := originalModel != mappedModel + var mappedModelBytes []byte + if needModelReplace && mappedModel != "" { + mappedModelBytes = []byte(mappedModel) + } bufferedStreamEvents := make([][]byte, 0, 4) eventCount := 0 tokenEventCount := 0 @@ -1483,12 +1553,11 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if clientDisconnected { return } - wErr := error(nil) - if _, wErr = io.WriteString(c.Writer, "data: "); wErr == nil { - if _, wErr = c.Writer.Write(message); wErr == nil { - _, wErr = io.WriteString(c.Writer, "\n\n") - } - } + frame := make([]byte, 0, len(message)+8) + frame = append(frame, "data: "...) + frame = append(frame, message...) + frame = append(frame, '\n', '\n') + _, wErr := c.Writer.Write(frame) if wErr == nil { wroteDownstream = true pendingFlushEvents++ @@ -1555,7 +1624,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( return nil, fmt.Errorf("openai ws read event: %w", readErr) } - eventType := strings.TrimSpace(gjson.GetBytes(message, "type").String()) + eventType, eventResponseID, responseField := parseOpenAIWSEventEnvelope(message) if eventType == "" { continue } @@ -1565,8 +1634,8 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } lastEventType = eventType - if responseID == "" { - responseID = extractOpenAIWSResponseID(message) + if responseID == "" && eventResponseID != "" { + responseID = eventResponseID } isTokenEvent := isOpenAIWSTokenEvent(eventType) @@ -1596,26 +1665,27 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( } if !clientDisconnected { - if needModelReplace && openAIWSEventMayContainModel(eventType) { + if needModelReplace && len(mappedModelBytes) > 0 && openAIWSEventMayContainModel(eventType) && bytes.Contains(message, mappedModelBytes) { message = replaceOpenAIWSMessageModel(message, mappedModel, originalModel) } - if openAIWSEventMayContainToolCalls(eventType) { + if openAIWSEventMayContainToolCalls(eventType) && openAIWSMessageLikelyContainsToolCalls(message) { if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(message); changed { message = corrected } } } if openAIWSEventShouldParseUsage(eventType) { - s.parseSSEUsageBytes(message, usage) + parseOpenAIWSResponseUsageFromCompletedEvent(message, usage) } if eventType == "error" { - errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) + errCodeRaw, errTypeRaw, errMsgRaw := parseOpenAIWSErrorEventFields(message) + errMsg := strings.TrimSpace(errMsgRaw) if errMsg == "" { errMsg = "Upstream websocket error" } - fallbackReason, canFallback := classifyOpenAIWSErrorEvent(message) - errCode, errType, errMessage := summarizeOpenAIWSErrorEventFields(message) + fallbackReason, canFallback := classifyOpenAIWSErrorEventFromRaw(errCodeRaw, errTypeRaw, errMsgRaw) + errCode, errType, errMessage := summarizeOpenAIWSErrorEventFieldsFromRaw(errCodeRaw, errTypeRaw, errMsgRaw) logOpenAIWSModeInfo( "error_event account_id=%d conn_id=%s idx=%d fallback_reason=%s can_fallback=%v err_code=%s err_type=%s err_message=%s", account.ID, @@ -1632,7 +1702,7 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( if !wroteDownstream && canFallback { return nil, wrapOpenAIWSFallback(fallbackReason, errors.New(errMsg)) } - statusCode := openAIWSErrorHTTPStatus(message) + statusCode := openAIWSErrorHTTPStatusFromRaw(errCodeRaw, errTypeRaw) setOpsUpstreamError(c, statusCode, errMsg, "") if reqStream && !clientDisconnected { flushBufferedStreamEvents("error_event") @@ -1674,19 +1744,12 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( emitStreamMessage(message, isTerminalEvent) } } else { - if response := gjson.GetBytes(message, "response"); response.Exists() && response.Type == gjson.JSON { - finalResponse = []byte(response.Raw) + if responseField.Exists() && responseField.Type == gjson.JSON { + finalResponse = []byte(responseField.Raw) } } if isTerminalEvent { - if !reqStream { - if len(finalResponse) == 0 { - if resp := gjson.GetBytes(message, "response"); resp.Exists() && resp.Type == gjson.JSON { - finalResponse = []byte(resp.Raw) - } - } - } break } } @@ -2055,13 +2118,15 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( false, ) } - logOpenAIWSModeInfo( - "ingress_ws_turn_request_sent account_id=%d turn=%d conn_id=%s payload_bytes=%d", - account.ID, - turn, - truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), - payloadBytes, - ) + if debugEnabled { + logOpenAIWSModeDebug( + "ingress_ws_turn_request_sent account_id=%d turn=%d conn_id=%s payload_bytes=%d", + account.ID, + turn, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + payloadBytes, + ) + } responseID := "" usage := OpenAIUsage{} @@ -2075,12 +2140,16 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( needModelReplace := false clientDisconnected := false mappedModel := "" + var mappedModelBytes []byte if originalModel != "" { mappedModel = account.GetMappedModel(originalModel) if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { mappedModel = normalizedModel } needModelReplace = mappedModel != "" && mappedModel != originalModel + if needModelReplace { + mappedModelBytes = []byte(mappedModel) + } } if streamValue, ok := payload["stream"].(bool); ok { reqStream = streamValue @@ -2096,10 +2165,10 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( ) } - if responseID == "" { - responseID = strings.TrimSpace(extractOpenAIWSResponseID(upstreamMessage)) + eventType, eventResponseID, _ := parseOpenAIWSEventEnvelope(upstreamMessage) + if responseID == "" && eventResponseID != "" { + responseID = eventResponseID } - eventType := strings.TrimSpace(gjson.GetBytes(upstreamMessage, "type").String()) if eventType != "" { eventCount++ if firstEventType == "" { @@ -2120,14 +2189,14 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( firstTokenMs = &ms } if openAIWSEventShouldParseUsage(eventType) { - s.parseSSEUsageBytes(upstreamMessage, &usage) + parseOpenAIWSResponseUsageFromCompletedEvent(upstreamMessage, &usage) } if !clientDisconnected { - if needModelReplace && openAIWSEventMayContainModel(eventType) { + if needModelReplace && len(mappedModelBytes) > 0 && openAIWSEventMayContainModel(eventType) && bytes.Contains(upstreamMessage, mappedModelBytes) { upstreamMessage = replaceOpenAIWSMessageModel(upstreamMessage, mappedModel, originalModel) } - if openAIWSEventMayContainToolCalls(eventType) { + if openAIWSEventMayContainToolCalls(eventType) && openAIWSMessageLikelyContainsToolCalls(upstreamMessage) { if corrected, changed := s.toolCorrector.CorrectToolCallsInSSEBytes(upstreamMessage); changed { upstreamMessage = corrected } @@ -2164,21 +2233,23 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( if firstTokenMs != nil { firstTokenMsValue = *firstTokenMs } - logOpenAIWSModeInfo( - "ingress_ws_turn_completed account_id=%d turn=%d conn_id=%s response_id=%s duration_ms=%d events=%d token_events=%d terminal_events=%d first_event=%s last_event=%s first_token_ms=%d client_disconnected=%v", - account.ID, - turn, - truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), - truncateOpenAIWSLogValue(responseID, openAIWSIDValueMaxLen), - time.Since(turnStart).Milliseconds(), - eventCount, - tokenEventCount, - terminalEventCount, - truncateOpenAIWSLogValue(firstEventType, openAIWSLogValueMaxLen), - truncateOpenAIWSLogValue(lastEventType, openAIWSLogValueMaxLen), - firstTokenMsValue, - clientDisconnected, - ) + if debugEnabled { + logOpenAIWSModeDebug( + "ingress_ws_turn_completed account_id=%d turn=%d conn_id=%s response_id=%s duration_ms=%d events=%d token_events=%d terminal_events=%d first_event=%s last_event=%s first_token_ms=%d client_disconnected=%v", + account.ID, + turn, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(responseID, openAIWSIDValueMaxLen), + time.Since(turnStart).Milliseconds(), + eventCount, + tokenEventCount, + terminalEventCount, + truncateOpenAIWSLogValue(firstEventType, openAIWSLogValueMaxLen), + truncateOpenAIWSLogValue(lastEventType, openAIWSLogValueMaxLen), + firstTokenMsValue, + clientDisconnected, + ) + } return &OpenAIForwardResult{ RequestID: responseID, Usage: usage, @@ -2461,13 +2532,13 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( return wrapOpenAIWSFallback("prewarm_"+classifyOpenAIWSReadFallbackReason(readErr), readErr) } - eventType := strings.TrimSpace(gjson.GetBytes(message, "type").String()) + eventType, eventResponseID, _ := parseOpenAIWSEventEnvelope(message) if eventType == "" { continue } prewarmEventCount++ - if prewarmResponseID == "" { - prewarmResponseID = extractOpenAIWSResponseID(message) + if prewarmResponseID == "" && eventResponseID != "" { + prewarmResponseID = eventResponseID } if prewarmEventCount <= openAIWSPrewarmEventLogHead || eventType == "error" || isOpenAIWSTerminalEvent(eventType) { logOpenAIWSModeInfo( @@ -2481,12 +2552,13 @@ func (s *OpenAIGatewayService) performOpenAIWSGeneratePrewarm( } if eventType == "error" { - errMsg := strings.TrimSpace(gjson.GetBytes(message, "error.message").String()) + errCodeRaw, errTypeRaw, errMsgRaw := parseOpenAIWSErrorEventFields(message) + errMsg := strings.TrimSpace(errMsgRaw) if errMsg == "" { errMsg = "OpenAI websocket prewarm error" } - fallbackReason, canFallback := classifyOpenAIWSErrorEvent(message) - errCode, errType, errMessage := summarizeOpenAIWSErrorEventFields(message) + fallbackReason, canFallback := classifyOpenAIWSErrorEventFromRaw(errCodeRaw, errTypeRaw, errMsgRaw) + errCode, errType, errMessage := summarizeOpenAIWSErrorEventFieldsFromRaw(errCodeRaw, errTypeRaw, errMsgRaw) logOpenAIWSModeInfo( "prewarm_error_event account_id=%d conn_id=%s idx=%d fallback_reason=%s can_fallback=%v err_code=%s err_type=%s err_message=%s", account.ID, @@ -2545,10 +2617,14 @@ func payloadAsJSONBytes(payload map[string]any) []byte { } func extractOpenAIWSResponseID(message []byte) string { - if id := strings.TrimSpace(gjson.GetBytes(message, "response.id").String()); id != "" { + if len(message) == 0 { + return "" + } + values := gjson.GetManyBytes(message, "response.id", "id") + if id := strings.TrimSpace(values[0].String()); id != "" { return id } - if id := strings.TrimSpace(gjson.GetBytes(message, "id").String()); id != "" { + if id := strings.TrimSpace(values[1].String()); id != "" { return id } return "" @@ -2588,13 +2664,25 @@ func replaceOpenAIWSMessageModel(message []byte, fromModel, toModel string) []by if len(message) == 0 { return message } + if strings.TrimSpace(fromModel) == "" || strings.TrimSpace(toModel) == "" || fromModel == toModel { + return message + } + if !bytes.Contains(message, []byte(`"model"`)) || !bytes.Contains(message, []byte(fromModel)) { + return message + } + modelValues := gjson.GetManyBytes(message, "model", "response.model") + replaceModel := modelValues[0].Exists() && modelValues[0].Str == fromModel + replaceResponseModel := modelValues[1].Exists() && modelValues[1].Str == fromModel + if !replaceModel && !replaceResponseModel { + return message + } updated := message - if m := gjson.GetBytes(updated, "model"); m.Exists() && m.Str == fromModel { + if replaceModel { if next, err := sjson.SetBytes(updated, "model", toModel); err == nil { updated = next } } - if m := gjson.GetBytes(updated, "response.model"); m.Exists() && m.Str == fromModel { + if replaceResponseModel { if next, err := sjson.SetBytes(updated, "response.model", toModel); err == nil { updated = next } @@ -2606,9 +2694,15 @@ func populateOpenAIUsageFromResponseJSON(body []byte, usage *OpenAIUsage) { if usage == nil || len(body) == 0 { return } - usage.InputTokens = int(gjson.GetBytes(body, "usage.input_tokens").Int()) - usage.OutputTokens = int(gjson.GetBytes(body, "usage.output_tokens").Int()) - usage.CacheReadInputTokens = int(gjson.GetBytes(body, "usage.input_tokens_details.cached_tokens").Int()) + values := gjson.GetManyBytes( + body, + "usage.input_tokens", + "usage.output_tokens", + "usage.input_tokens_details.cached_tokens", + ) + usage.InputTokens = int(values[0].Int()) + usage.OutputTokens = int(values[1].Int()) + usage.CacheReadInputTokens = int(values[2].Int()) } func getOpenAIGroupIDFromContext(c *gin.Context) int64 { @@ -2736,13 +2830,10 @@ func classifyOpenAIWSAcquireError(err error) string { return "acquire_conn" } -func classifyOpenAIWSErrorEvent(message []byte) (string, bool) { - if len(message) == 0 { - return "event_error", false - } - code := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.code").String())) - errType := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.type").String())) - msg := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.message").String())) +func classifyOpenAIWSErrorEventFromRaw(codeRaw, errTypeRaw, msgRaw string) (string, bool) { + code := strings.ToLower(strings.TrimSpace(codeRaw)) + errType := strings.ToLower(strings.TrimSpace(errTypeRaw)) + msg := strings.ToLower(strings.TrimSpace(msgRaw)) switch code { case "upgrade_required": @@ -2776,13 +2867,16 @@ func classifyOpenAIWSErrorEvent(message []byte) (string, bool) { return "event_error", false } -func openAIWSErrorHTTPStatus(message []byte) int { +func classifyOpenAIWSErrorEvent(message []byte) (string, bool) { if len(message) == 0 { - return http.StatusBadGateway + return "event_error", false } - code := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.code").String())) - errType := strings.ToLower(strings.TrimSpace(gjson.GetBytes(message, "error.type").String())) + return classifyOpenAIWSErrorEventFromRaw(parseOpenAIWSErrorEventFields(message)) +} +func openAIWSErrorHTTPStatusFromRaw(codeRaw, errTypeRaw string) int { + code := strings.ToLower(strings.TrimSpace(codeRaw)) + errType := strings.ToLower(strings.TrimSpace(errTypeRaw)) switch { case strings.Contains(errType, "invalid_request"), strings.Contains(code, "invalid_request"), @@ -2805,6 +2899,14 @@ func openAIWSErrorHTTPStatus(message []byte) int { } } +func openAIWSErrorHTTPStatus(message []byte) int { + if len(message) == 0 { + return http.StatusBadGateway + } + codeRaw, errTypeRaw, _ := parseOpenAIWSErrorEventFields(message) + return openAIWSErrorHTTPStatusFromRaw(codeRaw, errTypeRaw) +} + func (s *OpenAIGatewayService) openAIWSFallbackCooldown() time.Duration { if s == nil || s.cfg == nil { return 30 * time.Second diff --git a/backend/internal/service/openai_ws_forwarder_benchmark_test.go b/backend/internal/service/openai_ws_forwarder_benchmark_test.go index dd2d0ae97..bd03ab5a6 100644 --- a/backend/internal/service/openai_ws_forwarder_benchmark_test.go +++ b/backend/internal/service/openai_ws_forwarder_benchmark_test.go @@ -11,6 +11,7 @@ var ( benchmarkOpenAIWSPayloadJSONSink string benchmarkOpenAIWSStringSink string benchmarkOpenAIWSBoolSink bool + benchmarkOpenAIWSBytesSink []byte ) func BenchmarkOpenAIWSForwarderHotPath(b *testing.B) { @@ -75,3 +76,52 @@ func benchmarkOpenAIWSHotPathRequest() map[string]any { "store": false, } } + +func BenchmarkOpenAIWSEventEnvelopeParse(b *testing.B) { + event := []byte(`{"type":"response.completed","response":{"id":"resp_bench_1","model":"gpt-5.1","usage":{"input_tokens":12,"output_tokens":8}}}`) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + eventType, responseID, response := parseOpenAIWSEventEnvelope(event) + benchmarkOpenAIWSStringSink = eventType + benchmarkOpenAIWSStringSink = responseID + benchmarkOpenAIWSBoolSink = response.Exists() + } +} + +func BenchmarkOpenAIWSErrorEventFieldReuse(b *testing.B) { + event := []byte(`{"type":"error","error":{"type":"invalid_request_error","code":"invalid_request","message":"invalid input"}}`) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + codeRaw, errTypeRaw, errMsgRaw := parseOpenAIWSErrorEventFields(event) + benchmarkOpenAIWSStringSink, benchmarkOpenAIWSBoolSink = classifyOpenAIWSErrorEventFromRaw(codeRaw, errTypeRaw, errMsgRaw) + code, errType, errMsg := summarizeOpenAIWSErrorEventFieldsFromRaw(codeRaw, errTypeRaw, errMsgRaw) + benchmarkOpenAIWSStringSink = code + benchmarkOpenAIWSStringSink = errType + benchmarkOpenAIWSStringSink = errMsg + benchmarkOpenAIWSBoolSink = openAIWSErrorHTTPStatusFromRaw(codeRaw, errTypeRaw) > 0 + } +} + +func BenchmarkReplaceOpenAIWSMessageModel_NoMatchFastPath(b *testing.B) { + event := []byte(`{"type":"response.output_text.delta","delta":"hello world"}`) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchmarkOpenAIWSBytesSink = replaceOpenAIWSMessageModel(event, "gpt-5.1", "custom-model") + } +} + +func BenchmarkReplaceOpenAIWSMessageModel_DualReplace(b *testing.B) { + event := []byte(`{"type":"response.completed","model":"gpt-5.1","response":{"id":"resp_1","model":"gpt-5.1"}}`) + b.ReportAllocs() + b.ResetTimer() + + for i := 0; i < b.N; i++ { + benchmarkOpenAIWSBytesSink = replaceOpenAIWSMessageModel(event, "gpt-5.1", "custom-model") + } +} diff --git a/backend/internal/service/openai_ws_forwarder_hotpath_optimization_test.go b/backend/internal/service/openai_ws_forwarder_hotpath_optimization_test.go new file mode 100644 index 000000000..761676038 --- /dev/null +++ b/backend/internal/service/openai_ws_forwarder_hotpath_optimization_test.go @@ -0,0 +1,73 @@ +package service + +import ( + "net/http" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestParseOpenAIWSEventEnvelope(t *testing.T) { + eventType, responseID, response := parseOpenAIWSEventEnvelope([]byte(`{"type":"response.completed","response":{"id":"resp_1","model":"gpt-5.1"}}`)) + require.Equal(t, "response.completed", eventType) + require.Equal(t, "resp_1", responseID) + require.True(t, response.Exists()) + require.Equal(t, `{"id":"resp_1","model":"gpt-5.1"}`, response.Raw) + + eventType, responseID, response = parseOpenAIWSEventEnvelope([]byte(`{"type":"response.delta","id":"evt_1"}`)) + require.Equal(t, "response.delta", eventType) + require.Equal(t, "evt_1", responseID) + require.False(t, response.Exists()) +} + +func TestParseOpenAIWSResponseUsageFromCompletedEvent(t *testing.T) { + usage := &OpenAIUsage{} + parseOpenAIWSResponseUsageFromCompletedEvent( + []byte(`{"type":"response.completed","response":{"usage":{"input_tokens":11,"output_tokens":7,"input_tokens_details":{"cached_tokens":3}}}}`), + usage, + ) + require.Equal(t, 11, usage.InputTokens) + require.Equal(t, 7, usage.OutputTokens) + require.Equal(t, 3, usage.CacheReadInputTokens) +} + +func TestOpenAIWSErrorEventHelpers_ConsistentWithWrapper(t *testing.T) { + message := []byte(`{"type":"error","error":{"type":"invalid_request_error","code":"invalid_request","message":"invalid input"}}`) + codeRaw, errTypeRaw, errMsgRaw := parseOpenAIWSErrorEventFields(message) + + wrappedReason, wrappedRecoverable := classifyOpenAIWSErrorEvent(message) + rawReason, rawRecoverable := classifyOpenAIWSErrorEventFromRaw(codeRaw, errTypeRaw, errMsgRaw) + require.Equal(t, wrappedReason, rawReason) + require.Equal(t, wrappedRecoverable, rawRecoverable) + + wrappedStatus := openAIWSErrorHTTPStatus(message) + rawStatus := openAIWSErrorHTTPStatusFromRaw(codeRaw, errTypeRaw) + require.Equal(t, wrappedStatus, rawStatus) + require.Equal(t, http.StatusBadRequest, rawStatus) + + wrappedCode, wrappedType, wrappedMsg := summarizeOpenAIWSErrorEventFields(message) + rawCode, rawType, rawMsg := summarizeOpenAIWSErrorEventFieldsFromRaw(codeRaw, errTypeRaw, errMsgRaw) + require.Equal(t, wrappedCode, rawCode) + require.Equal(t, wrappedType, rawType) + require.Equal(t, wrappedMsg, rawMsg) +} + +func TestOpenAIWSMessageLikelyContainsToolCalls(t *testing.T) { + require.False(t, openAIWSMessageLikelyContainsToolCalls([]byte(`{"type":"response.output_text.delta","delta":"hello"}`))) + require.True(t, openAIWSMessageLikelyContainsToolCalls([]byte(`{"type":"response.output_item.added","item":{"tool_calls":[{"id":"tc1"}]}}`))) + require.True(t, openAIWSMessageLikelyContainsToolCalls([]byte(`{"type":"response.output_item.added","item":{"type":"function_call"}}`))) +} + +func TestReplaceOpenAIWSMessageModel_OptimizedStillCorrect(t *testing.T) { + noModel := []byte(`{"type":"response.output_text.delta","delta":"hello"}`) + require.Equal(t, string(noModel), string(replaceOpenAIWSMessageModel(noModel, "gpt-5.1", "custom-model"))) + + rootOnly := []byte(`{"type":"response.created","model":"gpt-5.1"}`) + require.Equal(t, `{"type":"response.created","model":"custom-model"}`, string(replaceOpenAIWSMessageModel(rootOnly, "gpt-5.1", "custom-model"))) + + responseOnly := []byte(`{"type":"response.completed","response":{"model":"gpt-5.1"}}`) + require.Equal(t, `{"type":"response.completed","response":{"model":"custom-model"}}`, string(replaceOpenAIWSMessageModel(responseOnly, "gpt-5.1", "custom-model"))) + + both := []byte(`{"model":"gpt-5.1","response":{"model":"gpt-5.1"}}`) + require.Equal(t, `{"model":"custom-model","response":{"model":"custom-model"}}`, string(replaceOpenAIWSMessageModel(both, "gpt-5.1", "custom-model"))) +} From 59718cfc4d305d76f515e41fb40a5381cab9b013 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 19:16:06 +0800 Subject: [PATCH 024/120] =?UTF-8?q?perf(openai):=20=E4=BC=98=E5=8C=96?= =?UTF-8?q?=E7=BD=91=E5=85=B3=E7=83=AD=E8=B7=AF=E5=BE=84=E5=B9=B6=E8=A1=A5?= =?UTF-8?q?=E5=85=85=E8=B0=83=E5=BA=A6=E5=9F=BA=E5=87=86?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../handler/openai_gateway_handler.go | 33 ++- .../handler/openai_gateway_handler_test.go | 21 ++ .../service/openai_account_scheduler.go | 275 +++++++++++------ ...openai_account_scheduler_benchmark_test.go | 83 ++++++ .../service/openai_account_scheduler_test.go | 87 ++++++ .../service/openai_gateway_service.go | 278 +++++++++++------- backend/internal/service/openai_ws_pool.go | 10 +- .../internal/service/openai_ws_pool_test.go | 13 + 8 files changed, 606 insertions(+), 194 deletions(-) create mode 100644 backend/internal/service/openai_account_scheduler_benchmark_test.go diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 1d16d9ffb..9e56a3fe0 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -1,6 +1,7 @@ package handler import ( + "bytes" "context" "encoding/json" "errors" @@ -35,6 +36,34 @@ type OpenAIGatewayHandler struct { maxAccountSwitches int } +const ( + openAIRequestBodyReadInitCap = 512 + openAIRequestBodyReadMaxInitCap = 1 << 20 +) + +func readRequestBodyWithPrealloc(req *http.Request) ([]byte, error) { + if req == nil || req.Body == nil { + return nil, nil + } + capHint := openAIRequestBodyReadInitCap + if req.ContentLength > 0 { + switch { + case req.ContentLength < int64(openAIRequestBodyReadInitCap): + capHint = openAIRequestBodyReadInitCap + case req.ContentLength > int64(openAIRequestBodyReadMaxInitCap): + capHint = openAIRequestBodyReadMaxInitCap + default: + capHint = int(req.ContentLength) + } + } + + buf := bytes.NewBuffer(make([]byte, 0, capHint)) + if _, err := io.Copy(buf, req.Body); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + // NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler func NewOpenAIGatewayHandler( gatewayService *service.OpenAIGatewayService, @@ -97,7 +126,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } // Read request body - body, err := io.ReadAll(c.Request.Body) + body, err := readRequestBodyWithPrealloc(c.Request) if err != nil { if maxErr, ok := extractMaxBytesError(err); ok { h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) @@ -112,8 +141,6 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } - setOpsRequestContext(c, "", false, body) - // 校验请求体 JSON 合法性 if !gjson.ValidBytes(body) { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") diff --git a/backend/internal/handler/openai_gateway_handler_test.go b/backend/internal/handler/openai_gateway_handler_test.go index a80867c4a..71ce29875 100644 --- a/backend/internal/handler/openai_gateway_handler_test.go +++ b/backend/internal/handler/openai_gateway_handler_test.go @@ -107,6 +107,27 @@ func TestOpenAIHandleStreamingAwareError_NonStreaming(t *testing.T) { assert.Equal(t, "test error", errorObj["message"]) } +func TestReadRequestBodyWithPrealloc(t *testing.T) { + payload := `{"model":"gpt-5","input":"hello"}` + req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(payload)) + req.ContentLength = int64(len(payload)) + + body, err := readRequestBodyWithPrealloc(req) + require.NoError(t, err) + require.Equal(t, payload, string(body)) +} + +func TestReadRequestBodyWithPrealloc_MaxBytesError(t *testing.T) { + rec := httptest.NewRecorder() + req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(strings.Repeat("x", 8))) + req.Body = http.MaxBytesReader(rec, req.Body, 4) + + _, err := readRequestBodyWithPrealloc(req) + require.Error(t, err) + var maxErr *http.MaxBytesError + require.ErrorAs(t, err, &maxErr) +} + func TestOpenAIEnsureForwardErrorResponse_WritesFallbackWhenNotWritten(t *testing.T) { gin.SetMode(gin.TestMode) w := httptest.NewRecorder() diff --git a/backend/internal/service/openai_account_scheduler.go b/backend/internal/service/openai_account_scheduler.go index c8cf6888d..bb7daf099 100644 --- a/backend/internal/service/openai_account_scheduler.go +++ b/backend/internal/service/openai_account_scheduler.go @@ -1,6 +1,7 @@ package service import ( + "container/heap" "context" "errors" "math" @@ -94,19 +95,49 @@ func (m *openAIAccountSchedulerMetrics) recordSwitch() { } type openAIAccountRuntimeStats struct { - mu sync.RWMutex - accounts map[int64]*openAIAccountRuntimeStat + accounts sync.Map + accountCount atomic.Int64 } type openAIAccountRuntimeStat struct { - errorRateEWMA float64 - ttftEWMA float64 - hasTTFT bool + errorRateEWMABits atomic.Uint64 + ttftEWMABits atomic.Uint64 } func newOpenAIAccountRuntimeStats() *openAIAccountRuntimeStats { - return &openAIAccountRuntimeStats{ - accounts: make(map[int64]*openAIAccountRuntimeStat, 64), + return &openAIAccountRuntimeStats{} +} + +func (s *openAIAccountRuntimeStats) loadOrCreate(accountID int64) *openAIAccountRuntimeStat { + if value, ok := s.accounts.Load(accountID); ok { + stat, _ := value.(*openAIAccountRuntimeStat) + if stat != nil { + return stat + } + } + + stat := &openAIAccountRuntimeStat{} + stat.ttftEWMABits.Store(math.Float64bits(math.NaN())) + actual, loaded := s.accounts.LoadOrStore(accountID, stat) + if !loaded { + s.accountCount.Add(1) + return stat + } + existing, _ := actual.(*openAIAccountRuntimeStat) + if existing != nil { + return existing + } + return stat +} + +func updateEWMAAtomic(target *atomic.Uint64, sample float64, alpha float64) { + for { + oldBits := target.Load() + oldValue := math.Float64frombits(oldBits) + newValue := alpha*sample + (1-alpha)*oldValue + if target.CompareAndSwap(oldBits, math.Float64bits(newValue)) { + return + } } } @@ -115,28 +146,30 @@ func (s *openAIAccountRuntimeStats) report(accountID int64, success bool, firstT return } const alpha = 0.2 - s.mu.Lock() - defer s.mu.Unlock() - - stat, ok := s.accounts[accountID] - if !ok { - stat = &openAIAccountRuntimeStat{} - s.accounts[accountID] = stat - } + stat := s.loadOrCreate(accountID) errorSample := 1.0 if success { errorSample = 0.0 } - stat.errorRateEWMA = alpha*errorSample + (1-alpha)*stat.errorRateEWMA + updateEWMAAtomic(&stat.errorRateEWMABits, errorSample, alpha) if firstTokenMs != nil && *firstTokenMs > 0 { ttft := float64(*firstTokenMs) - if !stat.hasTTFT { - stat.ttftEWMA = ttft - stat.hasTTFT = true - } else { - stat.ttftEWMA = alpha*ttft + (1-alpha)*stat.ttftEWMA + ttftBits := math.Float64bits(ttft) + for { + oldBits := stat.ttftEWMABits.Load() + oldValue := math.Float64frombits(oldBits) + if math.IsNaN(oldValue) { + if stat.ttftEWMABits.CompareAndSwap(oldBits, ttftBits) { + break + } + continue + } + newValue := alpha*ttft + (1-alpha)*oldValue + if stat.ttftEWMABits.CompareAndSwap(oldBits, math.Float64bits(newValue)) { + break + } } } } @@ -145,22 +178,27 @@ func (s *openAIAccountRuntimeStats) snapshot(accountID int64) (errorRate float64 if s == nil || accountID <= 0 { return 0, 0, false } - s.mu.RLock() - defer s.mu.RUnlock() - stat, ok := s.accounts[accountID] - if !ok || stat == nil { + value, ok := s.accounts.Load(accountID) + if !ok { + return 0, 0, false + } + stat, _ := value.(*openAIAccountRuntimeStat) + if stat == nil { return 0, 0, false } - return clamp01(stat.errorRateEWMA), stat.ttftEWMA, stat.hasTTFT + errorRate = clamp01(math.Float64frombits(stat.errorRateEWMABits.Load())) + ttftValue := math.Float64frombits(stat.ttftEWMABits.Load()) + if math.IsNaN(ttftValue) { + return errorRate, 0, false + } + return errorRate, ttftValue, true } func (s *openAIAccountRuntimeStats) size() int { if s == nil { return 0 } - s.mu.RLock() - defer s.mu.RUnlock() - return len(s.accounts) + return int(s.accountCount.Load()) } type defaultOpenAIAccountScheduler struct { @@ -316,6 +354,84 @@ type openAIAccountCandidateScore struct { hasTTFT bool } +type openAIAccountCandidateHeap []openAIAccountCandidateScore + +func (h openAIAccountCandidateHeap) Len() int { + return len(h) +} + +func (h openAIAccountCandidateHeap) Less(i, j int) bool { + // 最小堆根节点保存“最差”候选,便于 O(log k) 维护 topK。 + return isOpenAIAccountCandidateBetter(h[j], h[i]) +} + +func (h openAIAccountCandidateHeap) Swap(i, j int) { + h[i], h[j] = h[j], h[i] +} + +func (h *openAIAccountCandidateHeap) Push(x any) { + *h = append(*h, x.(openAIAccountCandidateScore)) +} + +func (h *openAIAccountCandidateHeap) Pop() any { + old := *h + n := len(old) + last := old[n-1] + *h = old[:n-1] + return last +} + +func isOpenAIAccountCandidateBetter(left openAIAccountCandidateScore, right openAIAccountCandidateScore) bool { + if left.score != right.score { + return left.score > right.score + } + if left.account.Priority != right.account.Priority { + return left.account.Priority < right.account.Priority + } + if left.loadInfo.LoadRate != right.loadInfo.LoadRate { + return left.loadInfo.LoadRate < right.loadInfo.LoadRate + } + if left.loadInfo.WaitingCount != right.loadInfo.WaitingCount { + return left.loadInfo.WaitingCount < right.loadInfo.WaitingCount + } + return left.account.ID < right.account.ID +} + +func selectTopKOpenAICandidates(candidates []openAIAccountCandidateScore, topK int) []openAIAccountCandidateScore { + if len(candidates) == 0 { + return nil + } + if topK <= 0 { + topK = 1 + } + if topK >= len(candidates) { + ranked := append([]openAIAccountCandidateScore(nil), candidates...) + sort.Slice(ranked, func(i, j int) bool { + return isOpenAIAccountCandidateBetter(ranked[i], ranked[j]) + }) + return ranked + } + + best := make(openAIAccountCandidateHeap, 0, topK) + for _, candidate := range candidates { + if len(best) < topK { + heap.Push(&best, candidate) + continue + } + if isOpenAIAccountCandidateBetter(candidate, best[0]) { + best[0] = candidate + heap.Fix(&best, 0) + } + } + + ranked := make([]openAIAccountCandidateScore, len(best)) + copy(ranked, best) + sort.Slice(ranked, func(i, j int) bool { + return isOpenAIAccountCandidateBetter(ranked[i], ranked[j]) + }) + return ranked +} + func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( ctx context.Context, req OpenAIAccountScheduleRequest, @@ -329,6 +445,7 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( } filtered := make([]*Account, 0, len(accounts)) + loadReq := make([]AccountWithConcurrency, 0, len(accounts)) for i := range accounts { account := &accounts[i] if req.ExcludedIDs != nil { @@ -343,6 +460,10 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( continue } filtered = append(filtered, account) + loadReq = append(loadReq, AccountWithConcurrency{ + ID: account.ID, + MaxConcurrency: account.Concurrency, + }) } if len(filtered) == 0 { return nil, 0, 0, 0, errors.New("no available OpenAI accounts") @@ -350,13 +471,6 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( loadMap := map[int64]*AccountLoadInfo{} if s.service.concurrencyService != nil { - loadReq := make([]AccountWithConcurrency, 0, len(filtered)) - for _, account := range filtered { - loadReq = append(loadReq, AccountWithConcurrency{ - ID: account.ID, - MaxConcurrency: account.Concurrency, - }) - } if batchLoad, loadErr := s.service.concurrencyService.GetAccountsLoadBatch(ctx, loadReq); loadErr == nil { loadMap = batchLoad } @@ -364,8 +478,10 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( minPriority, maxPriority := filtered[0].Priority, filtered[0].Priority maxWaiting := 1 - loadRates := make([]float64, 0, len(filtered)) - ttftSamples := make([]float64, 0, len(filtered)) + loadRateSum := 0.0 + loadRateSumSquares := 0.0 + minTTFT, maxTTFT := 0.0, 0.0 + hasTTFTSample := false candidates := make([]openAIAccountCandidateScore, 0, len(filtered)) for _, account := range filtered { loadInfo := loadMap[account.ID] @@ -383,9 +499,21 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( } errorRate, ttft, hasTTFT := s.stats.snapshot(account.ID) if hasTTFT && ttft > 0 { - ttftSamples = append(ttftSamples, ttft) + if !hasTTFTSample { + minTTFT, maxTTFT = ttft, ttft + hasTTFTSample = true + } else { + if ttft < minTTFT { + minTTFT = ttft + } + if ttft > maxTTFT { + maxTTFT = ttft + } + } } - loadRates = append(loadRates, float64(loadInfo.LoadRate)) + loadRate := float64(loadInfo.LoadRate) + loadRateSum += loadRate + loadRateSumSquares += loadRate * loadRate candidates = append(candidates, openAIAccountCandidateScore{ account: account, loadInfo: loadInfo, @@ -394,19 +522,7 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( hasTTFT: hasTTFT, }) } - - minTTFT, maxTTFT := 0.0, 0.0 - if len(ttftSamples) > 0 { - minTTFT, maxTTFT = ttftSamples[0], ttftSamples[0] - for _, sample := range ttftSamples[1:] { - if sample < minTTFT { - minTTFT = sample - } - if sample > maxTTFT { - maxTTFT = sample - } - } - } + loadSkew := calcLoadSkewByMoments(loadRateSum, loadRateSumSquares, len(candidates)) weights := s.service.openAIWSSchedulerWeights() for i := range candidates { @@ -419,7 +535,7 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( queueFactor := 1 - clamp01(float64(item.loadInfo.WaitingCount)/float64(maxWaiting)) errorFactor := 1 - clamp01(item.errorRate) ttftFactor := 0.5 - if item.hasTTFT && maxTTFT > minTTFT { + if item.hasTTFT && hasTTFTSample && maxTTFT > minTTFT { ttftFactor = 1 - clamp01((item.ttft-minTTFT)/(maxTTFT-minTTFT)) } @@ -430,24 +546,6 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( weights.TTFT*ttftFactor } - sort.SliceStable(candidates, func(i, j int) bool { - left := candidates[i] - right := candidates[j] - if left.score != right.score { - return left.score > right.score - } - if left.account.Priority != right.account.Priority { - return left.account.Priority < right.account.Priority - } - if left.loadInfo.LoadRate != right.loadInfo.LoadRate { - return left.loadInfo.LoadRate < right.loadInfo.LoadRate - } - if left.loadInfo.WaitingCount != right.loadInfo.WaitingCount { - return left.loadInfo.WaitingCount < right.loadInfo.WaitingCount - } - return left.account.ID < right.account.ID - }) - topK := s.service.openAIWSLBTopK() if topK > len(candidates) { topK = len(candidates) @@ -455,12 +553,13 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( if topK <= 0 { topK = 1 } + rankedCandidates := selectTopKOpenAICandidates(candidates, topK) for i := 0; i < topK; i++ { - candidate := candidates[i] + candidate := rankedCandidates[i] result, acquireErr := s.service.tryAcquireAccountSlot(ctx, candidate.account.ID, candidate.account.Concurrency) if acquireErr != nil { - return nil, len(candidates), topK, calcLoadSkew(loadRates), acquireErr + return nil, len(candidates), topK, loadSkew, acquireErr } if result != nil && result.Acquired { if req.SessionHash != "" { @@ -470,12 +569,12 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( Account: candidate.account, Acquired: true, ReleaseFunc: result.ReleaseFunc, - }, len(candidates), topK, calcLoadSkew(loadRates), nil + }, len(candidates), topK, loadSkew, nil } } cfg := s.service.schedulingConfig() - candidate := candidates[0] + candidate := rankedCandidates[0] return &AccountSelectionResult{ Account: candidate.account, WaitPlan: &AccountWaitPlan{ @@ -484,7 +583,7 @@ func (s *defaultOpenAIAccountScheduler) selectByLoadBalance( Timeout: cfg.FallbackWaitTimeout, MaxWaiting: cfg.FallbackMaxWaiting, }, - }, len(candidates), topK, calcLoadSkew(loadRates), nil + }, len(candidates), topK, loadSkew, nil } func (s *defaultOpenAIAccountScheduler) ReportResult(accountID int64, success bool, firstTokenMs *int) { @@ -647,19 +746,23 @@ func clamp01(value float64) float64 { } func calcLoadSkew(loadRates []float64) float64 { - if len(loadRates) <= 1 { - return 0 - } sum := 0.0 + sumSquares := 0.0 for _, value := range loadRates { sum += value + sumSquares += value * value } - mean := sum / float64(len(loadRates)) - variance := 0.0 - for _, value := range loadRates { - diff := value - mean - variance += diff * diff + return calcLoadSkewByMoments(sum, sumSquares, len(loadRates)) +} + +func calcLoadSkewByMoments(sum float64, sumSquares float64, count int) float64 { + if count <= 1 { + return 0 + } + mean := sum / float64(count) + variance := sumSquares/float64(count) - mean*mean + if variance < 0 { + variance = 0 } - variance /= float64(len(loadRates)) return math.Sqrt(variance) } diff --git a/backend/internal/service/openai_account_scheduler_benchmark_test.go b/backend/internal/service/openai_account_scheduler_benchmark_test.go new file mode 100644 index 000000000..897be5b0e --- /dev/null +++ b/backend/internal/service/openai_account_scheduler_benchmark_test.go @@ -0,0 +1,83 @@ +package service + +import ( + "sort" + "testing" +) + +func buildOpenAISchedulerBenchmarkCandidates(size int) []openAIAccountCandidateScore { + if size <= 0 { + return nil + } + candidates := make([]openAIAccountCandidateScore, 0, size) + for i := 0; i < size; i++ { + accountID := int64(10_000 + i) + candidates = append(candidates, openAIAccountCandidateScore{ + account: &Account{ + ID: accountID, + Priority: i % 7, + }, + loadInfo: &AccountLoadInfo{ + AccountID: accountID, + LoadRate: (i * 17) % 100, + WaitingCount: (i * 11) % 13, + }, + score: float64((i*29)%1000) / 100, + errorRate: float64((i * 5) % 100 / 100), + ttft: float64(30 + (i*3)%500), + hasTTFT: i%3 != 0, + }) + } + return candidates +} + +func selectTopKOpenAICandidatesBySortBenchmark(candidates []openAIAccountCandidateScore, topK int) []openAIAccountCandidateScore { + if len(candidates) == 0 { + return nil + } + if topK <= 0 { + topK = 1 + } + ranked := append([]openAIAccountCandidateScore(nil), candidates...) + sort.Slice(ranked, func(i, j int) bool { + return isOpenAIAccountCandidateBetter(ranked[i], ranked[j]) + }) + if topK > len(ranked) { + topK = len(ranked) + } + return ranked[:topK] +} + +func BenchmarkOpenAIAccountSchedulerSelectTopK(b *testing.B) { + cases := []struct { + name string + size int + topK int + }{ + {name: "n_16_k_3", size: 16, topK: 3}, + {name: "n_64_k_3", size: 64, topK: 3}, + {name: "n_256_k_5", size: 256, topK: 5}, + } + + for _, tc := range cases { + candidates := buildOpenAISchedulerBenchmarkCandidates(tc.size) + b.Run(tc.name+"/heap_topk", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + result := selectTopKOpenAICandidates(candidates, tc.topK) + if len(result) == 0 { + b.Fatal("unexpected empty result") + } + } + }) + b.Run(tc.name+"/full_sort", func(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + result := selectTopKOpenAICandidatesBySortBenchmark(candidates, tc.topK) + if len(result) == 0 { + b.Fatal("unexpected empty result") + } + } + }) + } +} diff --git a/backend/internal/service/openai_account_scheduler_test.go b/backend/internal/service/openai_account_scheduler_test.go index 13b74c1bd..9ccf697ea 100644 --- a/backend/internal/service/openai_account_scheduler_test.go +++ b/backend/internal/service/openai_account_scheduler_test.go @@ -2,6 +2,7 @@ package service import ( "context" + "sync" "testing" "time" @@ -273,3 +274,89 @@ func TestOpenAIGatewayService_OpenAIAccountSchedulerMetrics(t *testing.T) { func intPtrForTest(v int) *int { return &v } + +func TestOpenAIAccountRuntimeStats_ReportAndSnapshot(t *testing.T) { + stats := newOpenAIAccountRuntimeStats() + stats.report(1001, true, nil) + firstTTFT := 100 + stats.report(1001, false, &firstTTFT) + secondTTFT := 200 + stats.report(1001, false, &secondTTFT) + + errorRate, ttft, hasTTFT := stats.snapshot(1001) + require.True(t, hasTTFT) + require.InDelta(t, 0.36, errorRate, 1e-9) + require.InDelta(t, 120.0, ttft, 1e-9) + require.Equal(t, 1, stats.size()) +} + +func TestOpenAIAccountRuntimeStats_ReportConcurrent(t *testing.T) { + stats := newOpenAIAccountRuntimeStats() + + const ( + accountCount = 4 + workers = 16 + iterations = 800 + ) + var wg sync.WaitGroup + wg.Add(workers) + for worker := 0; worker < workers; worker++ { + worker := worker + go func() { + defer wg.Done() + for i := 0; i < iterations; i++ { + accountID := int64(i%accountCount + 1) + success := (i+worker)%3 != 0 + ttft := 80 + (i+worker)%40 + stats.report(accountID, success, &ttft) + } + }() + } + wg.Wait() + + require.Equal(t, accountCount, stats.size()) + for accountID := int64(1); accountID <= accountCount; accountID++ { + errorRate, ttft, hasTTFT := stats.snapshot(accountID) + require.GreaterOrEqual(t, errorRate, 0.0) + require.LessOrEqual(t, errorRate, 1.0) + require.True(t, hasTTFT) + require.Greater(t, ttft, 0.0) + } +} + +func TestSelectTopKOpenAICandidates(t *testing.T) { + candidates := []openAIAccountCandidateScore{ + { + account: &Account{ID: 11, Priority: 2}, + loadInfo: &AccountLoadInfo{LoadRate: 10, WaitingCount: 1}, + score: 10.0, + }, + { + account: &Account{ID: 12, Priority: 1}, + loadInfo: &AccountLoadInfo{LoadRate: 20, WaitingCount: 1}, + score: 9.5, + }, + { + account: &Account{ID: 13, Priority: 1}, + loadInfo: &AccountLoadInfo{LoadRate: 30, WaitingCount: 0}, + score: 10.0, + }, + { + account: &Account{ID: 14, Priority: 0}, + loadInfo: &AccountLoadInfo{LoadRate: 40, WaitingCount: 0}, + score: 8.0, + }, + } + + top2 := selectTopKOpenAICandidates(candidates, 2) + require.Len(t, top2, 2) + require.Equal(t, int64(13), top2[0].account.ID) + require.Equal(t, int64(11), top2[1].account.ID) + + topAll := selectTopKOpenAICandidates(candidates, 8) + require.Len(t, topAll, len(candidates)) + require.Equal(t, int64(13), topAll[0].account.ID) + require.Equal(t, int64(11), topAll[1].account.ID) + require.Equal(t, int64(12), topAll[2].account.ID) + require.Equal(t, int64(14), topAll[3].account.ID) +} diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index ffa2510e5..adddc462b 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -9,6 +9,7 @@ import ( "encoding/json" "errors" "fmt" + "hash/fnv" "io" "math/rand" "net/http" @@ -855,8 +856,9 @@ func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context, body []byte) return "" } - hash := sha256.Sum256([]byte(sessionID)) - return hex.EncodeToString(hash[:]) + h := fnv.New128a() + _, _ = h.Write([]byte(sessionID)) + return hex.EncodeToString(h.Sum(nil)) } // BindStickySession sets session -> account binding with standard TTL. @@ -2619,6 +2621,14 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp if !ok { return nil, errors.New("streaming not supported") } + bufferedWriter := bufio.NewWriterSize(w, 4*1024) + flushBuffered := func() error { + if err := bufferedWriter.Flush(); err != nil { + return err + } + flusher.Flush() + return nil + } usage := &OpenAIUsage{} var firstTokenMs *int @@ -2630,38 +2640,6 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp scanBuf := getSSEScannerBuf64K() scanner.Buffer(scanBuf[:0], maxLineSize) - type scanEvent struct { - line string - err error - } - // 独立 goroutine 读取上游,避免读取阻塞影响 keepalive/超时处理 - events := make(chan scanEvent, 16) - done := make(chan struct{}) - sendEvent := func(ev scanEvent) bool { - select { - case events <- ev: - return true - case <-done: - return false - } - } - var lastReadAt int64 - atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) - go func(scanBuf *sseScannerBuf64K) { - defer putSSEScannerBuf64K(scanBuf) - defer close(events) - for scanner.Scan() { - atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) - if !sendEvent(scanEvent{line: scanner.Text()}) { - return - } - } - if err := scanner.Err(); err != nil { - _ = sendEvent(scanEvent{err: err}) - } - }(scanBuf) - defer close(done) - streamInterval := time.Duration(0) if s.cfg != nil && s.cfg.Gateway.StreamDataIntervalTimeout > 0 { streamInterval = time.Duration(s.cfg.Gateway.StreamDataIntervalTimeout) * time.Second @@ -2705,86 +2683,177 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp } errorEventSent = true payload := `{"type":"error","sequence_number":0,"error":{"type":"upstream_error","message":` + strconv.Quote(reason) + `,"code":` + strconv.Quote(reason) + `}}` - _, _ = fmt.Fprintf(w, "data: %s\n\n", payload) - flusher.Flush() + if err := flushBuffered(); err != nil { + clientDisconnected = true + return + } + if _, err := bufferedWriter.WriteString("data: " + payload + "\n\n"); err != nil { + clientDisconnected = true + return + } + if err := flushBuffered(); err != nil { + clientDisconnected = true + } } needModelReplace := originalModel != mappedModel - - for { - select { - case ev, ok := <-events: - if !ok { - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil - } - if ev.err != nil { - // 客户端断开/取消请求时,上游读取往往会返回 context canceled。 - // /v1/responses 的 SSE 事件必须符合 OpenAI 协议;这里不注入自定义 error event,避免下游 SDK 解析失败。 - if errors.Is(ev.err, context.Canceled) || errors.Is(ev.err, context.DeadlineExceeded) { - logger.LegacyPrintf("service.openai_gateway", "Context canceled during streaming, returning collected usage") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil - } - // 客户端已断开时,上游出错仅影响体验,不影响计费;返回已收集 usage - if clientDisconnected { - logger.LegacyPrintf("service.openai_gateway", "Upstream read error after client disconnect: %v, returning collected usage", ev.err) - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil - } - if errors.Is(ev.err, bufio.ErrTooLong) { - logger.LegacyPrintf("service.openai_gateway", "SSE line too long: account=%d max_size=%d error=%v", account.ID, maxLineSize, ev.err) - sendErrorEvent("response_too_large") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, ev.err - } - sendErrorEvent("stream_read_error") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream read error: %w", ev.err) + resultWithUsage := func() *openaiStreamingResult { + return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs} + } + finalizeStream := func() (*openaiStreamingResult, error) { + if !clientDisconnected { + if err := flushBuffered(); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during final flush, returning collected usage") } + } + return resultWithUsage(), nil + } + handleScanErr := func(scanErr error) (*openaiStreamingResult, error, bool) { + if scanErr == nil { + return nil, nil, false + } + // 客户端断开/取消请求时,上游读取往往会返回 context canceled。 + // /v1/responses 的 SSE 事件必须符合 OpenAI 协议;这里不注入自定义 error event,避免下游 SDK 解析失败。 + if errors.Is(scanErr, context.Canceled) || errors.Is(scanErr, context.DeadlineExceeded) { + logger.LegacyPrintf("service.openai_gateway", "Context canceled during streaming, returning collected usage") + return resultWithUsage(), nil, true + } + // 客户端已断开时,上游出错仅影响体验,不影响计费;返回已收集 usage + if clientDisconnected { + logger.LegacyPrintf("service.openai_gateway", "Upstream read error after client disconnect: %v, returning collected usage", scanErr) + return resultWithUsage(), nil, true + } + if errors.Is(scanErr, bufio.ErrTooLong) { + logger.LegacyPrintf("service.openai_gateway", "SSE line too long: account=%d max_size=%d error=%v", account.ID, maxLineSize, scanErr) + sendErrorEvent("response_too_large") + return resultWithUsage(), scanErr, true + } + sendErrorEvent("stream_read_error") + return resultWithUsage(), fmt.Errorf("stream read error: %w", scanErr), true + } + processSSELine := func(line string, queueDrained bool) { + lastDataAt = time.Now() - line := ev.line - lastDataAt = time.Now() + // Extract data from SSE line (supports both "data: " and "data:" formats) + if data, ok := extractOpenAISSEDataLine(line); ok { - // Extract data from SSE line (supports both "data: " and "data:" formats) - if data, ok := extractOpenAISSEDataLine(line); ok { + // Replace model in response if needed. + // Fast path: most events do not contain model field values. + if needModelReplace && mappedModel != "" && strings.Contains(data, mappedModel) { + line = s.replaceModelInSSELine(line, mappedModel, originalModel) + } - // Replace model in response if needed - if needModelReplace { - line = s.replaceModelInSSELine(line, mappedModel, originalModel) - } + dataBytes := []byte(data) - dataBytes := []byte(data) + // Correct Codex tool calls if needed (apply_patch -> edit, etc.) + if correctedData, corrected := s.toolCorrector.CorrectToolCallsInSSEBytes(dataBytes); corrected { + dataBytes = correctedData + data = string(correctedData) + line = "data: " + data + } - // Correct Codex tool calls if needed (apply_patch -> edit, etc.) - if correctedData, corrected := s.toolCorrector.CorrectToolCallsInSSEBytes(dataBytes); corrected { - dataBytes = correctedData - data = string(correctedData) - line = "data: " + data + // 写入客户端(客户端断开后继续 drain 上游) + if !clientDisconnected { + shouldFlush := queueDrained + if firstTokenMs == nil && data != "" && data != "[DONE]" { + // 保证首个 token 事件尽快出站,避免影响 TTFT。 + shouldFlush = true } - - // 写入客户端(客户端断开后继续 drain 上游) - if !clientDisconnected { - if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { + if _, err := bufferedWriter.WriteString(line); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") + } else if _, err := bufferedWriter.WriteString("\n"); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") + } else if shouldFlush { + if err := flushBuffered(); err != nil { clientDisconnected = true - logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") - } else { - flusher.Flush() + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming flush, continuing to drain upstream for billing") } } + } - // Record first token time - if firstTokenMs == nil && data != "" && data != "[DONE]" { - ms := int(time.Since(startTime).Milliseconds()) - firstTokenMs = &ms - } - s.parseSSEUsageBytes(dataBytes, usage) - } else { - // Forward non-data lines as-is - if !clientDisconnected { - if _, err := fmt.Fprintf(w, "%s\n", line); err != nil { - clientDisconnected = true - logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") - } else { - flusher.Flush() - } + // Record first token time + if firstTokenMs == nil && data != "" && data != "[DONE]" { + ms := int(time.Since(startTime).Milliseconds()) + firstTokenMs = &ms + } + s.parseSSEUsageBytes(dataBytes, usage) + return + } + + // Forward non-data lines as-is + if !clientDisconnected { + if _, err := bufferedWriter.WriteString(line); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") + } else if _, err := bufferedWriter.WriteString("\n"); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") + } else if queueDrained { + if err := flushBuffered(); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming flush, continuing to drain upstream for billing") } } + } + } + + // 无超时/无 keepalive 的常见路径走同步扫描,减少 goroutine 与 channel 开销。 + if streamInterval <= 0 && keepaliveInterval <= 0 { + defer putSSEScannerBuf64K(scanBuf) + for scanner.Scan() { + processSSELine(scanner.Text(), true) + } + if result, err, done := handleScanErr(scanner.Err()); done { + return result, err + } + return finalizeStream() + } + + type scanEvent struct { + line string + err error + } + // 独立 goroutine 读取上游,避免读取阻塞影响 keepalive/超时处理 + events := make(chan scanEvent, 16) + done := make(chan struct{}) + sendEvent := func(ev scanEvent) bool { + select { + case events <- ev: + return true + case <-done: + return false + } + } + var lastReadAt int64 + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + go func(scanBuf *sseScannerBuf64K) { + defer putSSEScannerBuf64K(scanBuf) + defer close(events) + for scanner.Scan() { + atomic.StoreInt64(&lastReadAt, time.Now().UnixNano()) + if !sendEvent(scanEvent{line: scanner.Text()}) { + return + } + } + if err := scanner.Err(); err != nil { + _ = sendEvent(scanEvent{err: err}) + } + }(scanBuf) + defer close(done) + + for { + select { + case ev, ok := <-events: + if !ok { + return finalizeStream() + } + if result, err, done := handleScanErr(ev.err); done { + return result, err + } + processSSELine(ev.line, len(events) == 0) case <-intervalCh: lastRead := time.Unix(0, atomic.LoadInt64(&lastReadAt)) @@ -2793,7 +2862,7 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp } if clientDisconnected { logger.LegacyPrintf("service.openai_gateway", "Upstream timeout after client disconnect, returning collected usage") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, nil + return resultWithUsage(), nil } logger.LegacyPrintf("service.openai_gateway", "Stream data interval timeout: account=%d model=%s interval=%s", account.ID, originalModel, streamInterval) // 处理流超时,可能标记账户为临时不可调度或错误状态 @@ -2801,7 +2870,7 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp s.rateLimitService.HandleStreamTimeout(ctx, account, originalModel) } sendErrorEvent("stream_timeout") - return &openaiStreamingResult{usage: usage, firstTokenMs: firstTokenMs}, fmt.Errorf("stream data interval timeout") + return resultWithUsage(), fmt.Errorf("stream data interval timeout") case <-keepaliveCh: if clientDisconnected { @@ -2810,12 +2879,15 @@ func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp if time.Since(lastDataAt) < keepaliveInterval { continue } - if _, err := fmt.Fprint(w, ":\n\n"); err != nil { + if _, err := bufferedWriter.WriteString(":\n\n"); err != nil { clientDisconnected = true logger.LegacyPrintf("service.openai_gateway", "Client disconnected during streaming, continuing to drain upstream for billing") continue } - flusher.Flush() + if err := flushBuffered(); err != nil { + clientDisconnected = true + logger.LegacyPrintf("service.openai_gateway", "Client disconnected during keepalive flush, continuing to drain upstream for billing") + } } } @@ -2889,7 +2961,7 @@ func (s *OpenAIGatewayService) parseSSEUsageBytes(data []byte, usage *OpenAIUsag return } // 选择性解析:仅在数据中包含 completed 事件标识时才进入字段提取。 - if !bytes.Contains(data, []byte(`"response.completed"`)) { + if len(data) < 80 || !bytes.Contains(data, []byte(`"response.completed"`)) { return } if gjson.GetBytes(data, "type").String() != "response.completed" { diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index 50e114114..647cf48be 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -7,6 +7,7 @@ import ( "math" "net/http" "sort" + "strconv" "strings" "sync" "sync/atomic" @@ -20,9 +21,9 @@ const ( openAIWSConnHealthCheckIdle = 90 * time.Second openAIWSConnHealthCheckTO = 2 * time.Second openAIWSConnPrewarmExtraDelay = 2 * time.Second - openAIWSAcquireCleanupInterval = 3 * time.Second openAIWSBackgroundPingInterval = 30 * time.Second openAIWSBackgroundSweepTicker = 30 * time.Second + openAIWSAcquireCleanupInterval = openAIWSBackgroundSweepTicker openAIWSPrewarmFailureWindow = 30 * time.Second openAIWSPrewarmFailureSuppress = 2 @@ -1277,7 +1278,12 @@ func (p *openAIWSConnPool) dialConn(ctx context.Context, req openAIWSAcquireRequ func (p *openAIWSConnPool) nextConnID(accountID int64) string { seq := p.seq.Add(1) - return fmt.Sprintf("oa_ws_%d_%d", accountID, seq) + buf := make([]byte, 0, 32) + buf = append(buf, "oa_ws_"...) + buf = strconv.AppendInt(buf, accountID, 10) + buf = append(buf, '_') + buf = strconv.AppendUint(buf, seq, 10) + return string(buf) } func (p *openAIWSConnPool) shouldHealthCheckConn(conn *openAIWSConn) bool { diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index b612cc0f0..de3394423 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net/http" + "strings" "sync" "testing" "time" @@ -42,6 +43,18 @@ func TestOpenAIWSConnPool_CleanupStaleAndTrimIdle(t *testing.T) { require.NotNil(t, ap.conns["idle_new"], "newer idle should be kept") } +func TestOpenAIWSConnPool_NextConnIDFormat(t *testing.T) { + pool := newOpenAIWSConnPool(&config.Config{}) + id1 := pool.nextConnID(42) + id2 := pool.nextConnID(42) + + require.True(t, strings.HasPrefix(id1, "oa_ws_42_")) + require.True(t, strings.HasPrefix(id2, "oa_ws_42_")) + require.NotEqual(t, id1, id2) + require.Equal(t, "oa_ws_42_1", id1) + require.Equal(t, "oa_ws_42_2", id2) +} + func TestOpenAIWSConnLease_WriteJSONAndGuards(t *testing.T) { conn := newOpenAIWSConn("lease_write", 1, &openAIWSFakeConn{}, nil) lease := &openAIWSConnLease{conn: conn} From 59736ffdb4ba6f3c1684afabe6026d7974044b9d Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 19:20:07 +0800 Subject: [PATCH 025/120] =?UTF-8?q?fix(openai):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E4=BC=9A=E8=AF=9D=E7=B2=98=E6=80=A7=E5=93=88=E5=B8=8C=E4=B8=8E?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=E6=B1=A0=E6=B8=85=E7=90=86=E5=9B=9E=E5=BD=92?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 恢复 OpenAI 会话哈希为 SHA-256,避免粘性键兼容性回归 - 恢复 WS 获取路径 3 秒清理周期,降低关闭连接占坑窗口 - 在 OpenAI handler 早期校验前恢复请求上下文埋点 - 补充哈希算法与清理间隔回归测试 Co-Authored-By: Claude Opus 4.6 --- .../internal/handler/openai_gateway_handler.go | 2 ++ .../internal/service/openai_gateway_service.go | 6 ++---- .../service/openai_gateway_service_test.go | 17 +++++++++++++++++ backend/internal/service/openai_ws_pool.go | 2 +- backend/internal/service/openai_ws_pool_test.go | 5 +++++ 5 files changed, 27 insertions(+), 5 deletions(-) diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 9e56a3fe0..6caa92721 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -141,6 +141,8 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { return } + setOpsRequestContext(c, "", false, body) + // 校验请求体 JSON 合法性 if !gjson.ValidBytes(body) { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index adddc462b..7eb3ee002 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -9,7 +9,6 @@ import ( "encoding/json" "errors" "fmt" - "hash/fnv" "io" "math/rand" "net/http" @@ -856,9 +855,8 @@ func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context, body []byte) return "" } - h := fnv.New128a() - _, _ = h.Write([]byte(sessionID)) - return hex.EncodeToString(h.Sum(nil)) + hash := sha256.Sum256([]byte(sessionID)) + return hex.EncodeToString(hash[:]) } // BindStickySession sets session -> account binding with standard TTL. diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 226648e40..08958b445 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -4,6 +4,8 @@ import ( "bufio" "bytes" "context" + "crypto/sha256" + "encoding/hex" "errors" "io" "net/http" @@ -166,6 +168,21 @@ func TestOpenAIGatewayService_GenerateSessionHash_Priority(t *testing.T) { } } +func TestOpenAIGatewayService_GenerateSessionHash_UsesSHA256(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + + c.Request.Header.Set("session_id", "sess-fixed-value") + svc := &OpenAIGatewayService{} + + got := svc.GenerateSessionHash(c, nil) + sum := sha256.Sum256([]byte("sess-fixed-value")) + want := hex.EncodeToString(sum[:]) + require.Equal(t, want, got) +} + func (c stubConcurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { if c.waitCounts != nil { if count, ok := c.waitCounts[accountID]; ok { diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index 647cf48be..8fba52481 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -21,9 +21,9 @@ const ( openAIWSConnHealthCheckIdle = 90 * time.Second openAIWSConnHealthCheckTO = 2 * time.Second openAIWSConnPrewarmExtraDelay = 2 * time.Second + openAIWSAcquireCleanupInterval = 3 * time.Second openAIWSBackgroundPingInterval = 30 * time.Second openAIWSBackgroundSweepTicker = 30 * time.Second - openAIWSAcquireCleanupInterval = openAIWSBackgroundSweepTicker openAIWSPrewarmFailureWindow = 30 * time.Second openAIWSPrewarmFailureSuppress = 2 diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index de3394423..f0ab646cc 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -55,6 +55,11 @@ func TestOpenAIWSConnPool_NextConnIDFormat(t *testing.T) { require.Equal(t, "oa_ws_42_2", id2) } +func TestOpenAIWSConnPool_AcquireCleanupInterval(t *testing.T) { + require.Equal(t, 3*time.Second, openAIWSAcquireCleanupInterval) + require.Less(t, openAIWSAcquireCleanupInterval, openAIWSBackgroundSweepTicker) +} + func TestOpenAIWSConnLease_WriteJSONAndGuards(t *testing.T) { conn := newOpenAIWSConn("lease_write", 1, &openAIWSFakeConn{}, nil) lease := &openAIWSConnLease{conn: conn} From f134bf6d877d5f463c01b424cd4dd59bb71fcc8c Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 19:48:59 +0800 Subject: [PATCH 026/120] =?UTF-8?q?perf(backend):=20=E6=89=B9=E9=87=8F?= =?UTF-8?q?=E4=BC=98=E5=8C=96=E8=B0=83=E5=BA=A6=E9=93=BE=E8=B7=AF=E4=B8=8E?= =?UTF-8?q?=E7=BD=91=E5=85=B3=E7=83=AD=E8=B7=AF=E5=BE=84=E6=80=A7=E8=83=BD?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/internal/repository/account_repo.go | 41 ++- backend/internal/repository/group_repo.go | 48 ++++ .../internal/repository/ops_repo_dashboard.go | 263 +++++++++++------- backend/internal/repository/usage_log_repo.go | 55 ++++ .../repository/user_group_rate_repo.go | 86 +++++- backend/internal/service/account.go | 71 +++-- backend/internal/service/account_service.go | 48 +++- .../internal/service/account_wildcard_test.go | 47 ++++ backend/internal/service/admin_service.go | 155 ++++++++++- backend/internal/service/gateway_service.go | 252 ++++++++++++----- .../service/gemini_messages_compat_service.go | 43 ++- backend/internal/service/ratelimit_service.go | 216 ++++++++++++++ .../service/scheduler_snapshot_service.go | 75 ++++- 13 files changed, 1168 insertions(+), 232 deletions(-) diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index 3f77a57e1..cf4671db2 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -611,6 +611,43 @@ func (r *accountRepository) syncSchedulerAccountSnapshot(ctx context.Context, ac } } +func (r *accountRepository) syncSchedulerAccountSnapshots(ctx context.Context, accountIDs []int64) { + if r == nil || r.schedulerCache == nil || len(accountIDs) == 0 { + return + } + + uniqueIDs := make([]int64, 0, len(accountIDs)) + seen := make(map[int64]struct{}, len(accountIDs)) + for _, id := range accountIDs { + if id <= 0 { + continue + } + if _, exists := seen[id]; exists { + continue + } + seen[id] = struct{}{} + uniqueIDs = append(uniqueIDs, id) + } + if len(uniqueIDs) == 0 { + return + } + + accounts, err := r.GetByIDs(ctx, uniqueIDs) + if err != nil { + logger.LegacyPrintf("repository.account", "[Scheduler] batch sync account snapshot read failed: count=%d err=%v", len(uniqueIDs), err) + return + } + + for _, account := range accounts { + if account == nil { + continue + } + if err := r.schedulerCache.SetAccount(ctx, account); err != nil { + logger.LegacyPrintf("repository.account", "[Scheduler] batch sync account snapshot write failed: id=%d err=%v", account.ID, err) + } + } +} + func (r *accountRepository) ClearError(ctx context.Context, id int64) error { _, err := r.client.Account.Update(). Where(dbaccount.IDEQ(id)). @@ -1197,9 +1234,7 @@ func (r *accountRepository) BulkUpdate(ctx context.Context, ids []int64, updates shouldSync = true } if shouldSync { - for _, id := range ids { - r.syncSchedulerAccountSnapshot(ctx, id) - } + r.syncSchedulerAccountSnapshots(ctx, ids) } } return rows, nil diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index fd239996d..1aba210fe 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -281,6 +281,54 @@ func (r *groupRepository) ExistsByName(ctx context.Context, name string) (bool, return r.client.Group.Query().Where(group.NameEQ(name)).Exist(ctx) } +// ExistsByIDs 批量检查分组是否存在(仅检查未软删除记录)。 +// 返回结构:map[groupID]exists。 +func (r *groupRepository) ExistsByIDs(ctx context.Context, ids []int64) (map[int64]bool, error) { + result := make(map[int64]bool, len(ids)) + if len(ids) == 0 { + return result, nil + } + + uniqueIDs := make([]int64, 0, len(ids)) + seen := make(map[int64]struct{}, len(ids)) + for _, id := range ids { + if id <= 0 { + continue + } + if _, ok := seen[id]; ok { + continue + } + seen[id] = struct{}{} + uniqueIDs = append(uniqueIDs, id) + result[id] = false + } + if len(uniqueIDs) == 0 { + return result, nil + } + + rows, err := r.sql.QueryContext(ctx, ` + SELECT id + FROM groups + WHERE id = ANY($1) AND deleted_at IS NULL + `, pq.Array(uniqueIDs)) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var id int64 + if err := rows.Scan(&id); err != nil { + return nil, err + } + result[id] = true + } + if err := rows.Err(); err != nil { + return nil, err + } + return result, nil +} + func (r *groupRepository) GetAccountCount(ctx context.Context, groupID int64) (int64, error) { var count int64 if err := scanSingleRow(ctx, r.sql, "SELECT COUNT(*) FROM account_groups WHERE group_id = $1", []any{groupID}, &count); err != nil { diff --git a/backend/internal/repository/ops_repo_dashboard.go b/backend/internal/repository/ops_repo_dashboard.go index 85791a9a6..4246bcd82 100644 --- a/backend/internal/repository/ops_repo_dashboard.go +++ b/backend/internal/repository/ops_repo_dashboard.go @@ -12,6 +12,11 @@ import ( "github.com/Wei-Shaw/sub2api/internal/service" ) +const ( + opsRawLatencyQueryTimeout = 2 * time.Second + opsRawPeakQueryTimeout = 1500 * time.Millisecond +) + func (r *opsRepository) GetDashboardOverview(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { if r == nil || r.db == nil { return nil, fmt.Errorf("nil ops repository") @@ -45,15 +50,24 @@ func (r *opsRepository) GetDashboardOverview(ctx context.Context, filter *servic func (r *opsRepository) getDashboardOverviewRaw(ctx context.Context, filter *service.OpsDashboardFilter) (*service.OpsDashboardOverview, error) { start := filter.StartTime.UTC() end := filter.EndTime.UTC() + degraded := false successCount, tokenConsumed, err := r.queryUsageCounts(ctx, filter, start, end) if err != nil { return nil, err } - duration, ttft, err := r.queryUsageLatency(ctx, filter, start, end) + latencyCtx, cancelLatency := context.WithTimeout(ctx, opsRawLatencyQueryTimeout) + duration, ttft, err := r.queryUsageLatency(latencyCtx, filter, start, end) + cancelLatency() if err != nil { - return nil, err + if isQueryTimeoutErr(err) { + degraded = true + duration = service.OpsPercentiles{} + ttft = service.OpsPercentiles{} + } else { + return nil, err + } } errorTotal, businessLimited, errorCountSLA, upstreamExcl, upstream429, upstream529, err := r.queryErrorCounts(ctx, filter, start, end) @@ -75,20 +89,40 @@ func (r *opsRepository) getDashboardOverviewRaw(ctx context.Context, filter *ser qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) if err != nil { - return nil, err + if isQueryTimeoutErr(err) { + degraded = true + } else { + return nil, err + } } - qpsPeak, err := r.queryPeakQPS(ctx, filter, start, end) + peakCtx, cancelPeak := context.WithTimeout(ctx, opsRawPeakQueryTimeout) + qpsPeak, tpsPeak, err := r.queryPeakRates(peakCtx, filter, start, end) + cancelPeak() if err != nil { - return nil, err - } - tpsPeak, err := r.queryPeakTPS(ctx, filter, start, end) - if err != nil { - return nil, err + if isQueryTimeoutErr(err) { + degraded = true + } else { + return nil, err + } } qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + if degraded { + if qpsCurrent <= 0 { + qpsCurrent = qpsAvg + } + if tpsCurrent <= 0 { + tpsCurrent = tpsAvg + } + if qpsPeak <= 0 { + qpsPeak = roundTo1DP(math.Max(qpsCurrent, qpsAvg)) + } + if tpsPeak <= 0 { + tpsPeak = roundTo1DP(math.Max(tpsCurrent, tpsAvg)) + } + } return &service.OpsDashboardOverview{ StartTime: start, @@ -230,26 +264,45 @@ func (r *opsRepository) getDashboardOverviewPreaggregated(ctx context.Context, f sla := safeDivideFloat64(float64(successCount), float64(requestCountSLA)) errorRate := safeDivideFloat64(float64(errorCountSLA), float64(requestCountSLA)) upstreamErrorRate := safeDivideFloat64(float64(upstreamExcl), float64(requestCountSLA)) + degraded := false // Keep "current" rates as raw, to preserve realtime semantics. qpsCurrent, tpsCurrent, err := r.queryCurrentRates(ctx, filter, end) if err != nil { - return nil, err + if isQueryTimeoutErr(err) { + degraded = true + } else { + return nil, err + } } - // NOTE: peak still uses raw logs (minute granularity). This is typically cheaper than percentile_cont - // and keeps semantics consistent across modes. - qpsPeak, err := r.queryPeakQPS(ctx, filter, start, end) + peakCtx, cancelPeak := context.WithTimeout(ctx, opsRawPeakQueryTimeout) + qpsPeak, tpsPeak, err := r.queryPeakRates(peakCtx, filter, start, end) + cancelPeak() if err != nil { - return nil, err - } - tpsPeak, err := r.queryPeakTPS(ctx, filter, start, end) - if err != nil { - return nil, err + if isQueryTimeoutErr(err) { + degraded = true + } else { + return nil, err + } } qpsAvg := roundTo1DP(float64(requestCountTotal) / windowSeconds) tpsAvg := roundTo1DP(float64(tokenConsumed) / windowSeconds) + if degraded { + if qpsCurrent <= 0 { + qpsCurrent = qpsAvg + } + if tpsCurrent <= 0 { + tpsCurrent = tpsAvg + } + if qpsPeak <= 0 { + qpsPeak = roundTo1DP(math.Max(qpsCurrent, qpsAvg)) + } + if tpsPeak <= 0 { + tpsPeak = roundTo1DP(math.Max(tpsCurrent, tpsAvg)) + } + } return &service.OpsDashboardOverview{ StartTime: start, @@ -577,9 +630,16 @@ func (r *opsRepository) queryRawPartial(ctx context.Context, filter *service.Ops return nil, err } - duration, ttft, err := r.queryUsageLatency(ctx, filter, start, end) + latencyCtx, cancelLatency := context.WithTimeout(ctx, opsRawLatencyQueryTimeout) + duration, ttft, err := r.queryUsageLatency(latencyCtx, filter, start, end) + cancelLatency() if err != nil { - return nil, err + if isQueryTimeoutErr(err) { + duration = service.OpsPercentiles{} + ttft = service.OpsPercentiles{} + } else { + return nil, err + } } errorTotal, businessLimited, errorCountSLA, upstreamExcl, upstream429, upstream529, err := r.queryErrorCounts(ctx, filter, start, end) @@ -735,68 +795,56 @@ FROM usage_logs ul } func (r *opsRepository) queryUsageLatency(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (duration service.OpsPercentiles, ttft service.OpsPercentiles, err error) { - { - join, where, args, _ := buildUsageWhere(filter, start, end, 1) - q := ` + join, where, args, _ := buildUsageWhere(filter, start, end, 1) + q := ` SELECT - percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) AS p50, - percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) AS p90, - percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) AS p95, - percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) AS p99, - AVG(duration_ms) AS avg_ms, - MAX(duration_ms) AS max_ms + percentile_cont(0.50) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_p99, + AVG(duration_ms) FILTER (WHERE duration_ms IS NOT NULL) AS duration_avg, + MAX(duration_ms) AS duration_max, + percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p50, + percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p90, + percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p95, + percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_p99, + AVG(first_token_ms) FILTER (WHERE first_token_ms IS NOT NULL) AS ttft_avg, + MAX(first_token_ms) AS ttft_max FROM usage_logs ul ` + join + ` -` + where + ` -AND duration_ms IS NOT NULL` +` + where - var p50, p90, p95, p99 sql.NullFloat64 - var avg sql.NullFloat64 - var max sql.NullInt64 - if err := r.db.QueryRowContext(ctx, q, args...).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { - return service.OpsPercentiles{}, service.OpsPercentiles{}, err - } - duration.P50 = floatToIntPtr(p50) - duration.P90 = floatToIntPtr(p90) - duration.P95 = floatToIntPtr(p95) - duration.P99 = floatToIntPtr(p99) - duration.Avg = floatToIntPtr(avg) - if max.Valid { - v := int(max.Int64) - duration.Max = &v - } + var dP50, dP90, dP95, dP99 sql.NullFloat64 + var dAvg sql.NullFloat64 + var dMax sql.NullInt64 + var tP50, tP90, tP95, tP99 sql.NullFloat64 + var tAvg sql.NullFloat64 + var tMax sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan( + &dP50, &dP90, &dP95, &dP99, &dAvg, &dMax, + &tP50, &tP90, &tP95, &tP99, &tAvg, &tMax, + ); err != nil { + return service.OpsPercentiles{}, service.OpsPercentiles{}, err } - { - join, where, args, _ := buildUsageWhere(filter, start, end, 1) - q := ` -SELECT - percentile_cont(0.50) WITHIN GROUP (ORDER BY first_token_ms) AS p50, - percentile_cont(0.90) WITHIN GROUP (ORDER BY first_token_ms) AS p90, - percentile_cont(0.95) WITHIN GROUP (ORDER BY first_token_ms) AS p95, - percentile_cont(0.99) WITHIN GROUP (ORDER BY first_token_ms) AS p99, - AVG(first_token_ms) AS avg_ms, - MAX(first_token_ms) AS max_ms -FROM usage_logs ul -` + join + ` -` + where + ` -AND first_token_ms IS NOT NULL` + duration.P50 = floatToIntPtr(dP50) + duration.P90 = floatToIntPtr(dP90) + duration.P95 = floatToIntPtr(dP95) + duration.P99 = floatToIntPtr(dP99) + duration.Avg = floatToIntPtr(dAvg) + if dMax.Valid { + v := int(dMax.Int64) + duration.Max = &v + } - var p50, p90, p95, p99 sql.NullFloat64 - var avg sql.NullFloat64 - var max sql.NullInt64 - if err := r.db.QueryRowContext(ctx, q, args...).Scan(&p50, &p90, &p95, &p99, &avg, &max); err != nil { - return service.OpsPercentiles{}, service.OpsPercentiles{}, err - } - ttft.P50 = floatToIntPtr(p50) - ttft.P90 = floatToIntPtr(p90) - ttft.P95 = floatToIntPtr(p95) - ttft.P99 = floatToIntPtr(p99) - ttft.Avg = floatToIntPtr(avg) - if max.Valid { - v := int(max.Int64) - ttft.Max = &v - } + ttft.P50 = floatToIntPtr(tP50) + ttft.P90 = floatToIntPtr(tP90) + ttft.P95 = floatToIntPtr(tP95) + ttft.P99 = floatToIntPtr(tP99) + ttft.Avg = floatToIntPtr(tAvg) + if tMax.Valid { + v := int(tMax.Int64) + ttft.Max = &v } return duration, ttft, nil @@ -855,19 +903,32 @@ func (r *opsRepository) queryCurrentRates(ctx context.Context, filter *service.O } func (r *opsRepository) queryPeakQPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { + qpsPeak, _, err := r.queryPeakRates(ctx, filter, start, end) + return qpsPeak, err +} + +func (r *opsRepository) queryPeakTPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { + _, tpsPeak, err := r.queryPeakRates(ctx, filter, start, end) + return tpsPeak, err +} + +func (r *opsRepository) queryPeakRates(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (qpsPeak float64, tpsPeak float64, err error) { usageJoin, usageWhere, usageArgs, next := buildUsageWhere(filter, start, end, 1) errorWhere, errorArgs, _ := buildErrorWhere(filter, start, end, next) q := ` WITH usage_buckets AS ( - SELECT date_trunc('minute', ul.created_at) AS bucket, COUNT(*) AS cnt + SELECT + date_trunc('minute', ul.created_at) AS bucket, + COUNT(*) AS req_cnt, + COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS token_cnt FROM usage_logs ul ` + usageJoin + ` ` + usageWhere + ` GROUP BY 1 ), error_buckets AS ( - SELECT date_trunc('minute', created_at) AS bucket, COUNT(*) AS cnt + SELECT date_trunc('minute', created_at) AS bucket, COUNT(*) AS err_cnt FROM ops_error_logs ` + errorWhere + ` AND COALESCE(status_code, 0) >= 400 @@ -875,47 +936,33 @@ error_buckets AS ( ), combined AS ( SELECT COALESCE(u.bucket, e.bucket) AS bucket, - COALESCE(u.cnt, 0) + COALESCE(e.cnt, 0) AS total + COALESCE(u.req_cnt, 0) + COALESCE(e.err_cnt, 0) AS total_req, + COALESCE(u.token_cnt, 0) AS total_tokens FROM usage_buckets u FULL OUTER JOIN error_buckets e ON u.bucket = e.bucket ) -SELECT COALESCE(MAX(total), 0) FROM combined` +SELECT + COALESCE(MAX(total_req), 0) AS max_req_per_min, + COALESCE(MAX(total_tokens), 0) AS max_tokens_per_min +FROM combined` args := append(usageArgs, errorArgs...) - var maxPerMinute sql.NullInt64 - if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxPerMinute); err != nil { - return 0, err + var maxReqPerMinute, maxTokensPerMinute sql.NullInt64 + if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxReqPerMinute, &maxTokensPerMinute); err != nil { + return 0, 0, err + } + if maxReqPerMinute.Valid && maxReqPerMinute.Int64 > 0 { + qpsPeak = roundTo1DP(float64(maxReqPerMinute.Int64) / 60.0) } - if !maxPerMinute.Valid || maxPerMinute.Int64 <= 0 { - return 0, nil + if maxTokensPerMinute.Valid && maxTokensPerMinute.Int64 > 0 { + tpsPeak = roundTo1DP(float64(maxTokensPerMinute.Int64) / 60.0) } - return roundTo1DP(float64(maxPerMinute.Int64) / 60.0), nil + return qpsPeak, tpsPeak, nil } -func (r *opsRepository) queryPeakTPS(ctx context.Context, filter *service.OpsDashboardFilter, start, end time.Time) (float64, error) { - join, where, args, _ := buildUsageWhere(filter, start, end, 1) - - q := ` -SELECT COALESCE(MAX(tokens_per_min), 0) -FROM ( - SELECT - date_trunc('minute', ul.created_at) AS bucket, - COALESCE(SUM(input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens), 0) AS tokens_per_min - FROM usage_logs ul - ` + join + ` - ` + where + ` - GROUP BY 1 -) t` - - var maxPerMinute sql.NullInt64 - if err := r.db.QueryRowContext(ctx, q, args...).Scan(&maxPerMinute); err != nil { - return 0, err - } - if !maxPerMinute.Valid || maxPerMinute.Int64 <= 0 { - return 0, nil - } - return roundTo1DP(float64(maxPerMinute.Int64) / 60.0), nil +func isQueryTimeoutErr(err error) bool { + return errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) } func buildUsageWhere(filter *service.OpsDashboardFilter, start, end time.Time, startIndex int) (join string, where string, args []any, nextIndex int) { diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index 2b4e061a7..01dc69b55 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -974,6 +974,61 @@ func (r *usageLogRepository) GetAccountWindowStatsBatch(ctx context.Context, acc return result, nil } +// GetGeminiUsageTotalsBatch 批量聚合 Gemini 账号在窗口内的 Pro/Flash 请求与用量。 +// 模型分类规则与 service.geminiModelClassFromName 一致:model 包含 flash/lite 视为 flash,其余视为 pro。 +func (r *usageLogRepository) GetGeminiUsageTotalsBatch(ctx context.Context, accountIDs []int64, startTime, endTime time.Time) (map[int64]service.GeminiUsageTotals, error) { + result := make(map[int64]service.GeminiUsageTotals, len(accountIDs)) + if len(accountIDs) == 0 { + return result, nil + } + + query := ` + SELECT + account_id, + COALESCE(SUM(CASE WHEN LOWER(COALESCE(model, '')) LIKE '%flash%' OR LOWER(COALESCE(model, '')) LIKE '%lite%' THEN 1 ELSE 0 END), 0) AS flash_requests, + COALESCE(SUM(CASE WHEN LOWER(COALESCE(model, '')) LIKE '%flash%' OR LOWER(COALESCE(model, '')) LIKE '%lite%' THEN 0 ELSE 1 END), 0) AS pro_requests, + COALESCE(SUM(CASE WHEN LOWER(COALESCE(model, '')) LIKE '%flash%' OR LOWER(COALESCE(model, '')) LIKE '%lite%' THEN (input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) ELSE 0 END), 0) AS flash_tokens, + COALESCE(SUM(CASE WHEN LOWER(COALESCE(model, '')) LIKE '%flash%' OR LOWER(COALESCE(model, '')) LIKE '%lite%' THEN 0 ELSE (input_tokens + output_tokens + cache_creation_tokens + cache_read_tokens) END), 0) AS pro_tokens, + COALESCE(SUM(CASE WHEN LOWER(COALESCE(model, '')) LIKE '%flash%' OR LOWER(COALESCE(model, '')) LIKE '%lite%' THEN actual_cost ELSE 0 END), 0) AS flash_cost, + COALESCE(SUM(CASE WHEN LOWER(COALESCE(model, '')) LIKE '%flash%' OR LOWER(COALESCE(model, '')) LIKE '%lite%' THEN 0 ELSE actual_cost END), 0) AS pro_cost + FROM usage_logs + WHERE account_id = ANY($1) AND created_at >= $2 AND created_at < $3 + GROUP BY account_id + ` + rows, err := r.sql.QueryContext(ctx, query, pq.Array(accountIDs), startTime, endTime) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var accountID int64 + var totals service.GeminiUsageTotals + if err := rows.Scan( + &accountID, + &totals.FlashRequests, + &totals.ProRequests, + &totals.FlashTokens, + &totals.ProTokens, + &totals.FlashCost, + &totals.ProCost, + ); err != nil { + return nil, err + } + result[accountID] = totals + } + if err := rows.Err(); err != nil { + return nil, err + } + + for _, accountID := range accountIDs { + if _, ok := result[accountID]; !ok { + result[accountID] = service.GeminiUsageTotals{} + } + } + return result, nil +} + // TrendDataPoint represents a single point in trend data type TrendDataPoint = usagestats.TrendDataPoint diff --git a/backend/internal/repository/user_group_rate_repo.go b/backend/internal/repository/user_group_rate_repo.go index eb65403b2..e3b110968 100644 --- a/backend/internal/repository/user_group_rate_repo.go +++ b/backend/internal/repository/user_group_rate_repo.go @@ -6,6 +6,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/lib/pq" ) type userGroupRateRepository struct { @@ -41,6 +42,59 @@ func (r *userGroupRateRepository) GetByUserID(ctx context.Context, userID int64) return result, nil } +// GetByUserIDs 批量获取多个用户的专属分组倍率。 +// 返回结构:map[userID]map[groupID]rate +func (r *userGroupRateRepository) GetByUserIDs(ctx context.Context, userIDs []int64) (map[int64]map[int64]float64, error) { + result := make(map[int64]map[int64]float64, len(userIDs)) + if len(userIDs) == 0 { + return result, nil + } + + uniqueIDs := make([]int64, 0, len(userIDs)) + seen := make(map[int64]struct{}, len(userIDs)) + for _, userID := range userIDs { + if userID <= 0 { + continue + } + if _, exists := seen[userID]; exists { + continue + } + seen[userID] = struct{}{} + uniqueIDs = append(uniqueIDs, userID) + result[userID] = make(map[int64]float64) + } + if len(uniqueIDs) == 0 { + return result, nil + } + + rows, err := r.sql.QueryContext(ctx, ` + SELECT user_id, group_id, rate_multiplier + FROM user_group_rate_multipliers + WHERE user_id = ANY($1) + `, pq.Array(uniqueIDs)) + if err != nil { + return nil, err + } + defer func() { _ = rows.Close() }() + + for rows.Next() { + var userID int64 + var groupID int64 + var rate float64 + if err := rows.Scan(&userID, &groupID, &rate); err != nil { + return nil, err + } + if _, ok := result[userID]; !ok { + result[userID] = make(map[int64]float64) + } + result[userID][groupID] = rate + } + if err := rows.Err(); err != nil { + return nil, err + } + return result, nil +} + // GetByUserAndGroup 获取用户在特定分组的专属倍率 func (r *userGroupRateRepository) GetByUserAndGroup(ctx context.Context, userID, groupID int64) (*float64, error) { query := `SELECT rate_multiplier FROM user_group_rate_multipliers WHERE user_id = $1 AND group_id = $2` @@ -65,33 +119,43 @@ func (r *userGroupRateRepository) SyncUserGroupRates(ctx context.Context, userID // 分离需要删除和需要 upsert 的记录 var toDelete []int64 - toUpsert := make(map[int64]float64) + upsertGroupIDs := make([]int64, 0, len(rates)) + upsertRates := make([]float64, 0, len(rates)) for groupID, rate := range rates { if rate == nil { toDelete = append(toDelete, groupID) } else { - toUpsert[groupID] = *rate + upsertGroupIDs = append(upsertGroupIDs, groupID) + upsertRates = append(upsertRates, *rate) } } // 删除指定的记录 - for _, groupID := range toDelete { - _, err := r.sql.ExecContext(ctx, - `DELETE FROM user_group_rate_multipliers WHERE user_id = $1 AND group_id = $2`, - userID, groupID) - if err != nil { + if len(toDelete) > 0 { + if _, err := r.sql.ExecContext(ctx, + `DELETE FROM user_group_rate_multipliers WHERE user_id = $1 AND group_id = ANY($2)`, + userID, pq.Array(toDelete)); err != nil { return err } } // Upsert 记录 now := time.Now() - for groupID, rate := range toUpsert { + if len(upsertGroupIDs) > 0 { _, err := r.sql.ExecContext(ctx, ` INSERT INTO user_group_rate_multipliers (user_id, group_id, rate_multiplier, created_at, updated_at) - VALUES ($1, $2, $3, $4, $4) - ON CONFLICT (user_id, group_id) DO UPDATE SET rate_multiplier = $3, updated_at = $4 - `, userID, groupID, rate, now) + SELECT + $1::bigint, + data.group_id, + data.rate_multiplier, + $2::timestamptz, + $2::timestamptz + FROM unnest($3::bigint[], $4::double precision[]) AS data(group_id, rate_multiplier) + ON CONFLICT (user_id, group_id) + DO UPDATE SET + rate_multiplier = EXCLUDED.rate_multiplier, + updated_at = EXCLUDED.updated_at + `, userID, now, pq.Array(upsertGroupIDs), pq.Array(upsertRates)) if err != nil { return err } diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index 84483e51f..2ad2c5dbc 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -3,6 +3,7 @@ package service import ( "encoding/json" + "reflect" "sort" "strconv" "strings" @@ -50,6 +51,13 @@ type Account struct { AccountGroups []AccountGroup GroupIDs []int64 Groups []*Group + + // model_mapping 热路径缓存(非持久化字段) + modelMappingCache map[string]string + modelMappingCacheReady bool + modelMappingCacheCredentialsPtr uintptr + modelMappingCacheRawPtr uintptr + modelMappingCacheRawLen int } type TempUnschedulableRule struct { @@ -349,6 +357,29 @@ func parseTempUnschedInt(value any) int { } func (a *Account) GetModelMapping() map[string]string { + credentialsPtr := mapPtr(a.Credentials) + rawMapping, _ := a.Credentials["model_mapping"].(map[string]any) + rawPtr := mapPtr(rawMapping) + rawLen := len(rawMapping) + + if a.modelMappingCacheReady && + a.modelMappingCacheCredentialsPtr == credentialsPtr && + a.modelMappingCacheRawPtr == rawPtr && + a.modelMappingCacheRawLen == rawLen { + return a.modelMappingCache + } + + mapping := a.resolveModelMapping(rawMapping) + + a.modelMappingCache = mapping + a.modelMappingCacheReady = true + a.modelMappingCacheCredentialsPtr = credentialsPtr + a.modelMappingCacheRawPtr = rawPtr + a.modelMappingCacheRawLen = rawLen + return mapping +} + +func (a *Account) resolveModelMapping(rawMapping map[string]any) map[string]string { if a.Credentials == nil { // Antigravity 平台使用默认映射 if a.Platform == domain.PlatformAntigravity { @@ -356,32 +387,31 @@ func (a *Account) GetModelMapping() map[string]string { } return nil } - raw, ok := a.Credentials["model_mapping"] - if !ok || raw == nil { + if len(rawMapping) == 0 { // Antigravity 平台使用默认映射 if a.Platform == domain.PlatformAntigravity { return domain.DefaultAntigravityModelMapping } return nil } - if m, ok := raw.(map[string]any); ok { - result := make(map[string]string) - for k, v := range m { - if s, ok := v.(string); ok { - result[k] = s - } + + result := make(map[string]string) + for k, v := range rawMapping { + if s, ok := v.(string); ok { + result[k] = s } - if len(result) > 0 { - if a.Platform == domain.PlatformAntigravity { - ensureAntigravityDefaultPassthroughs(result, []string{ - "gemini-3-flash", - "gemini-3.1-pro-high", - "gemini-3.1-pro-low", - }) - } - return result + } + if len(result) > 0 { + if a.Platform == domain.PlatformAntigravity { + ensureAntigravityDefaultPassthroughs(result, []string{ + "gemini-3-flash", + "gemini-3.1-pro-high", + "gemini-3.1-pro-low", + }) } + return result } + // Antigravity 平台使用默认映射 if a.Platform == domain.PlatformAntigravity { return domain.DefaultAntigravityModelMapping @@ -389,6 +419,13 @@ func (a *Account) GetModelMapping() map[string]string { return nil } +func mapPtr(m map[string]any) uintptr { + if m == nil { + return 0 + } + return reflect.ValueOf(m).Pointer() +} + func ensureAntigravityDefaultPassthrough(mapping map[string]string, model string) { if mapping == nil || model == "" { return diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index b301049f1..be18cdd19 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -119,6 +119,10 @@ type AccountService struct { groupRepo GroupRepository } +type groupExistenceBatchChecker interface { + ExistsByIDs(ctx context.Context, ids []int64) (map[int64]bool, error) +} + // NewAccountService 创建账号服务实例 func NewAccountService(accountRepo AccountRepository, groupRepo GroupRepository) *AccountService { return &AccountService{ @@ -131,11 +135,8 @@ func NewAccountService(accountRepo AccountRepository, groupRepo GroupRepository) func (s *AccountService) Create(ctx context.Context, req CreateAccountRequest) (*Account, error) { // 验证分组是否存在(如果指定了分组) if len(req.GroupIDs) > 0 { - for _, groupID := range req.GroupIDs { - _, err := s.groupRepo.GetByID(ctx, groupID) - if err != nil { - return nil, fmt.Errorf("get group: %w", err) - } + if err := s.validateGroupIDsExist(ctx, req.GroupIDs); err != nil { + return nil, err } } @@ -256,11 +257,8 @@ func (s *AccountService) Update(ctx context.Context, id int64, req UpdateAccount // 先验证分组是否存在(在任何写操作之前) if req.GroupIDs != nil { - for _, groupID := range *req.GroupIDs { - _, err := s.groupRepo.GetByID(ctx, groupID) - if err != nil { - return nil, fmt.Errorf("get group: %w", err) - } + if err := s.validateGroupIDsExist(ctx, *req.GroupIDs); err != nil { + return nil, err } } @@ -300,6 +298,36 @@ func (s *AccountService) Delete(ctx context.Context, id int64) error { return nil } +func (s *AccountService) validateGroupIDsExist(ctx context.Context, groupIDs []int64) error { + if len(groupIDs) == 0 { + return nil + } + + if batchChecker, ok := s.groupRepo.(groupExistenceBatchChecker); ok { + existsByID, err := batchChecker.ExistsByIDs(ctx, groupIDs) + if err != nil { + return fmt.Errorf("check groups exists: %w", err) + } + for _, groupID := range groupIDs { + if groupID <= 0 { + return fmt.Errorf("get group: %w", ErrGroupNotFound) + } + if !existsByID[groupID] { + return fmt.Errorf("get group: %w", ErrGroupNotFound) + } + } + return nil + } + + for _, groupID := range groupIDs { + _, err := s.groupRepo.GetByID(ctx, groupID) + if err != nil { + return fmt.Errorf("get group: %w", err) + } + } + return nil +} + // UpdateStatus 更新账号状态 func (s *AccountService) UpdateStatus(ctx context.Context, id int64, status string, errorMessage string) error { account, err := s.accountRepo.GetByID(ctx, id) diff --git a/backend/internal/service/account_wildcard_test.go b/backend/internal/service/account_wildcard_test.go index 6a9acc681..86239e6e4 100644 --- a/backend/internal/service/account_wildcard_test.go +++ b/backend/internal/service/account_wildcard_test.go @@ -314,3 +314,50 @@ func TestAccountGetModelMapping_AntigravityRespectsWildcardOverride(t *testing.T t.Fatalf("expected wildcard mapping to stay effective, got: %q", mapped) } } + +func TestAccountGetModelMapping_CacheInvalidatesOnCredentialsReplace(t *testing.T) { + account := &Account{ + Credentials: map[string]any{ + "model_mapping": map[string]any{ + "claude-3-5-sonnet": "upstream-a", + }, + }, + } + + first := account.GetModelMapping() + if first["claude-3-5-sonnet"] != "upstream-a" { + t.Fatalf("unexpected first mapping: %v", first) + } + + account.Credentials = map[string]any{ + "model_mapping": map[string]any{ + "claude-3-5-sonnet": "upstream-b", + }, + } + second := account.GetModelMapping() + if second["claude-3-5-sonnet"] != "upstream-b" { + t.Fatalf("expected cache invalidated after credentials replace, got: %v", second) + } +} + +func TestAccountGetModelMapping_CacheInvalidatesOnMappingLenChange(t *testing.T) { + rawMapping := map[string]any{ + "claude-sonnet": "sonnet-a", + } + account := &Account{ + Credentials: map[string]any{ + "model_mapping": rawMapping, + }, + } + + first := account.GetModelMapping() + if len(first) != 1 { + t.Fatalf("unexpected first mapping length: %d", len(first)) + } + + rawMapping["claude-opus"] = "opus-b" + second := account.GetModelMapping() + if second["claude-opus"] != "opus-b" { + t.Fatalf("expected cache invalidated after mapping len change, got: %v", second) + } +} diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index 473396611..bde64c28a 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -402,6 +402,14 @@ type adminServiceImpl struct { authCacheInvalidator APIKeyAuthCacheInvalidator } +type userGroupRateBatchReader interface { + GetByUserIDs(ctx context.Context, userIDs []int64) (map[int64]map[int64]float64, error) +} + +type groupExistenceBatchReader interface { + ExistsByIDs(ctx context.Context, ids []int64) (map[int64]bool, error) +} + // NewAdminService creates a new AdminService func NewAdminService( userRepo UserRepository, @@ -442,13 +450,30 @@ func (s *adminServiceImpl) ListUsers(ctx context.Context, page, pageSize int, fi } // 批量加载用户专属分组倍率 if s.userGroupRateRepo != nil && len(users) > 0 { - for i := range users { - rates, err := s.userGroupRateRepo.GetByUserID(ctx, users[i].ID) + if batchRepo, ok := s.userGroupRateRepo.(userGroupRateBatchReader); ok { + userIDs := make([]int64, 0, len(users)) + for i := range users { + userIDs = append(userIDs, users[i].ID) + } + ratesByUser, err := batchRepo.GetByUserIDs(ctx, userIDs) if err != nil { - logger.LegacyPrintf("service.admin", "failed to load user group rates: user_id=%d err=%v", users[i].ID, err) - continue + logger.LegacyPrintf("service.admin", "failed to load user group rates in batch: err=%v", err) + } else { + for i := range users { + if rates, ok := ratesByUser[users[i].ID]; ok { + users[i].GroupRates = rates + } + } + } + } else { + for i := range users { + rates, err := s.userGroupRateRepo.GetByUserID(ctx, users[i].ID) + if err != nil { + logger.LegacyPrintf("service.admin", "failed to load user group rates: user_id=%d err=%v", users[i].ID, err) + continue + } + users[i].GroupRates = rates } - users[i].GroupRates = rates } } return users, result.Total, nil @@ -1303,10 +1328,8 @@ func (s *adminServiceImpl) UpdateAccount(ctx context.Context, id int64, input *U // 先验证分组是否存在(在任何写操作之前) if input.GroupIDs != nil { - for _, groupID := range *input.GroupIDs { - if _, err := s.groupRepo.GetByID(ctx, groupID); err != nil { - return nil, fmt.Errorf("get group: %w", err) - } + if err := s.validateGroupIDsExist(ctx, *input.GroupIDs); err != nil { + return nil, err } // 检查混合渠道风险(除非用户已确认) @@ -1348,11 +1371,18 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp if len(input.AccountIDs) == 0 { return result, nil } + if input.GroupIDs != nil { + if err := s.validateGroupIDsExist(ctx, *input.GroupIDs); err != nil { + return nil, err + } + } needMixedChannelCheck := input.GroupIDs != nil && !input.SkipMixedChannelCheck // 预加载账号平台信息(混合渠道检查或 Sora 同步需要)。 platformByID := map[int64]string{} + groupAccountsByID := map[int64][]Account{} + groupNameByID := map[int64]string{} if needMixedChannelCheck { accounts, err := s.accountRepo.GetByIDs(ctx, input.AccountIDs) if err != nil { @@ -1366,6 +1396,13 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp } } } + + loadedAccounts, loadedNames, err := s.preloadMixedChannelRiskData(ctx, *input.GroupIDs) + if err != nil { + return nil, err + } + groupAccountsByID = loadedAccounts + groupNameByID = loadedNames } if input.RateMultiplier != nil { @@ -1426,7 +1463,7 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp } platform = account.Platform } - if err := s.checkMixedChannelRisk(ctx, accountID, platform, *input.GroupIDs); err != nil { + if err := s.checkMixedChannelRiskWithPreloaded(accountID, platform, *input.GroupIDs, groupAccountsByID, groupNameByID); err != nil { entry.Success = false entry.Error = err.Error() result.Failed++ @@ -2115,6 +2152,104 @@ func (s *adminServiceImpl) checkMixedChannelRisk(ctx context.Context, currentAcc return nil } +func (s *adminServiceImpl) preloadMixedChannelRiskData(ctx context.Context, groupIDs []int64) (map[int64][]Account, map[int64]string, error) { + accountsByGroup := make(map[int64][]Account) + groupNameByID := make(map[int64]string) + if len(groupIDs) == 0 { + return accountsByGroup, groupNameByID, nil + } + + seen := make(map[int64]struct{}, len(groupIDs)) + for _, groupID := range groupIDs { + if groupID <= 0 { + continue + } + if _, ok := seen[groupID]; ok { + continue + } + seen[groupID] = struct{}{} + + accounts, err := s.accountRepo.ListByGroup(ctx, groupID) + if err != nil { + return nil, nil, fmt.Errorf("get accounts in group %d: %w", groupID, err) + } + accountsByGroup[groupID] = accounts + + group, err := s.groupRepo.GetByID(ctx, groupID) + if err != nil { + continue + } + if group != nil { + groupNameByID[groupID] = group.Name + } + } + + return accountsByGroup, groupNameByID, nil +} + +func (s *adminServiceImpl) validateGroupIDsExist(ctx context.Context, groupIDs []int64) error { + if len(groupIDs) == 0 { + return nil + } + + if batchReader, ok := s.groupRepo.(groupExistenceBatchReader); ok { + existsByID, err := batchReader.ExistsByIDs(ctx, groupIDs) + if err != nil { + return fmt.Errorf("check groups exists: %w", err) + } + for _, groupID := range groupIDs { + if groupID <= 0 || !existsByID[groupID] { + return fmt.Errorf("get group: %w", ErrGroupNotFound) + } + } + return nil + } + + for _, groupID := range groupIDs { + if _, err := s.groupRepo.GetByID(ctx, groupID); err != nil { + return fmt.Errorf("get group: %w", err) + } + } + return nil +} + +func (s *adminServiceImpl) checkMixedChannelRiskWithPreloaded(currentAccountID int64, currentAccountPlatform string, groupIDs []int64, accountsByGroup map[int64][]Account, groupNameByID map[int64]string) error { + currentPlatform := getAccountPlatform(currentAccountPlatform) + if currentPlatform == "" { + return nil + } + + for _, groupID := range groupIDs { + accounts := accountsByGroup[groupID] + for _, account := range accounts { + if currentAccountID > 0 && account.ID == currentAccountID { + continue + } + + otherPlatform := getAccountPlatform(account.Platform) + if otherPlatform == "" { + continue + } + + if currentPlatform != otherPlatform { + groupName := fmt.Sprintf("Group %d", groupID) + if name := strings.TrimSpace(groupNameByID[groupID]); name != "" { + groupName = name + } + + return &MixedChannelError{ + GroupID: groupID, + GroupName: groupName, + CurrentPlatform: currentPlatform, + OtherPlatform: otherPlatform, + } + } + } + } + + return nil +} + // CheckMixedChannelRisk checks whether target groups contain mixed channels for the current account platform. func (s *adminServiceImpl) CheckMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error { return s.checkMixedChannelRisk(ctx, currentAccountID, currentAccountPlatform, groupIDs) diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index 5c14e7f9e..fb875f7f3 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -5102,9 +5102,9 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http pendingEventLines := make([]string, 0, 4) - processSSEEvent := func(lines []string) ([]string, string, error) { + processSSEEvent := func(lines []string) ([]string, string, *sseUsagePatch, error) { if len(lines) == 0 { - return nil, "", nil + return nil, "", nil, nil } eventName := "" @@ -5121,11 +5121,11 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } if eventName == "error" { - return nil, dataLine, errors.New("have error in stream") + return nil, dataLine, nil, errors.New("have error in stream") } if dataLine == "" { - return []string{strings.Join(lines, "\n") + "\n\n"}, "", nil + return []string{strings.Join(lines, "\n") + "\n\n"}, "", nil, nil } if dataLine == "[DONE]" { @@ -5134,7 +5134,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http block = "event: " + eventName + "\n" } block += "data: " + dataLine + "\n\n" - return []string{block}, dataLine, nil + return []string{block}, dataLine, nil, nil } var event map[string]any @@ -5145,25 +5145,26 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http block = "event: " + eventName + "\n" } block += "data: " + dataLine + "\n\n" - return []string{block}, dataLine, nil + return []string{block}, dataLine, nil, nil } eventType, _ := event["type"].(string) if eventName == "" { eventName = eventType } + eventChanged := false // 兼容 Kimi cached_tokens → cache_read_input_tokens if eventType == "message_start" { if msg, ok := event["message"].(map[string]any); ok { if u, ok := msg["usage"].(map[string]any); ok { - reconcileCachedTokens(u) + eventChanged = reconcileCachedTokens(u) || eventChanged } } } if eventType == "message_delta" { if u, ok := event["usage"].(map[string]any); ok { - reconcileCachedTokens(u) + eventChanged = reconcileCachedTokens(u) || eventChanged } } @@ -5173,13 +5174,13 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http if eventType == "message_start" { if msg, ok := event["message"].(map[string]any); ok { if u, ok := msg["usage"].(map[string]any); ok { - rewriteCacheCreationJSON(u, overrideTarget) + eventChanged = rewriteCacheCreationJSON(u, overrideTarget) || eventChanged } } } if eventType == "message_delta" { if u, ok := event["usage"].(map[string]any); ok { - rewriteCacheCreationJSON(u, overrideTarget) + eventChanged = rewriteCacheCreationJSON(u, overrideTarget) || eventChanged } } } @@ -5188,10 +5189,21 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http if msg, ok := event["message"].(map[string]any); ok { if model, ok := msg["model"].(string); ok && model == mappedModel { msg["model"] = originalModel + eventChanged = true } } } + usagePatch := s.extractSSEUsagePatch(event) + if !eventChanged { + block := "" + if eventName != "" { + block = "event: " + eventName + "\n" + } + block += "data: " + dataLine + "\n\n" + return []string{block}, dataLine, usagePatch, nil + } + newData, err := json.Marshal(event) if err != nil { // 序列化失败,直接透传原始数据 @@ -5200,7 +5212,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http block = "event: " + eventName + "\n" } block += "data: " + dataLine + "\n\n" - return []string{block}, dataLine, nil + return []string{block}, dataLine, usagePatch, nil } block := "" @@ -5208,7 +5220,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http block = "event: " + eventName + "\n" } block += "data: " + string(newData) + "\n\n" - return []string{block}, string(newData), nil + return []string{block}, string(newData), usagePatch, nil } for { @@ -5246,7 +5258,7 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http continue } - outputBlocks, data, err := processSSEEvent(pendingEventLines) + outputBlocks, data, usagePatch, err := processSSEEvent(pendingEventLines) pendingEventLines = pendingEventLines[:0] if err != nil { if clientDisconnected { @@ -5269,7 +5281,9 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http ms := int(time.Since(startTime).Milliseconds()) firstTokenMs = &ms } - s.parseSSEUsage(data, usage) + if usagePatch != nil { + mergeSSEUsagePatch(usage, usagePatch) + } } } continue @@ -5300,64 +5314,163 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http } func (s *GatewayService) parseSSEUsage(data string, usage *ClaudeUsage) { - // 解析message_start获取input tokens(标准Claude API格式) - var msgStart struct { - Type string `json:"type"` - Message struct { - Usage ClaudeUsage `json:"usage"` - } `json:"message"` + if usage == nil { + return } - if json.Unmarshal([]byte(data), &msgStart) == nil && msgStart.Type == "message_start" { - usage.InputTokens = msgStart.Message.Usage.InputTokens - usage.CacheCreationInputTokens = msgStart.Message.Usage.CacheCreationInputTokens - usage.CacheReadInputTokens = msgStart.Message.Usage.CacheReadInputTokens - // 解析嵌套的 cache_creation 对象中的 5m/1h 明细 - cc5m := gjson.Get(data, "message.usage.cache_creation.ephemeral_5m_input_tokens") - cc1h := gjson.Get(data, "message.usage.cache_creation.ephemeral_1h_input_tokens") - if cc5m.Exists() || cc1h.Exists() { - usage.CacheCreation5mTokens = int(cc5m.Int()) - usage.CacheCreation1hTokens = int(cc1h.Int()) - } + var event map[string]any + if err := json.Unmarshal([]byte(data), &event); err != nil { + return } - // 解析message_delta获取tokens(兼容GLM等把所有usage放在delta中的API) - var msgDelta struct { - Type string `json:"type"` - Usage struct { - InputTokens int `json:"input_tokens"` - OutputTokens int `json:"output_tokens"` - CacheCreationInputTokens int `json:"cache_creation_input_tokens"` - CacheReadInputTokens int `json:"cache_read_input_tokens"` - } `json:"usage"` + if patch := s.extractSSEUsagePatch(event); patch != nil { + mergeSSEUsagePatch(usage, patch) + } +} + +type sseUsagePatch struct { + inputTokens int + hasInputTokens bool + outputTokens int + hasOutputTokens bool + cacheCreationInputTokens int + hasCacheCreationInput bool + cacheReadInputTokens int + hasCacheReadInput bool + cacheCreation5mTokens int + hasCacheCreation5m bool + cacheCreation1hTokens int + hasCacheCreation1h bool +} + +func (s *GatewayService) extractSSEUsagePatch(event map[string]any) *sseUsagePatch { + if len(event) == 0 { + return nil } - if json.Unmarshal([]byte(data), &msgDelta) == nil && msgDelta.Type == "message_delta" { - // message_delta 仅覆盖存在且非0的字段 - // 避免覆盖 message_start 中已有的值(如 input_tokens) - // Claude API 的 message_delta 通常只包含 output_tokens - if msgDelta.Usage.InputTokens > 0 { - usage.InputTokens = msgDelta.Usage.InputTokens + + eventType, _ := event["type"].(string) + switch eventType { + case "message_start": + msg, _ := event["message"].(map[string]any) + usageObj, _ := msg["usage"].(map[string]any) + if len(usageObj) == 0 { + return nil + } + + patch := &sseUsagePatch{} + patch.hasInputTokens = true + if v, ok := parseSSEUsageInt(usageObj["input_tokens"]); ok { + patch.inputTokens = v + } + patch.hasCacheCreationInput = true + if v, ok := parseSSEUsageInt(usageObj["cache_creation_input_tokens"]); ok { + patch.cacheCreationInputTokens = v + } + patch.hasCacheReadInput = true + if v, ok := parseSSEUsageInt(usageObj["cache_read_input_tokens"]); ok { + patch.cacheReadInputTokens = v + } + if cc, ok := usageObj["cache_creation"].(map[string]any); ok { + if v, exists := parseSSEUsageInt(cc["ephemeral_5m_input_tokens"]); exists { + patch.cacheCreation5mTokens = v + patch.hasCacheCreation5m = true + } + if v, exists := parseSSEUsageInt(cc["ephemeral_1h_input_tokens"]); exists { + patch.cacheCreation1hTokens = v + patch.hasCacheCreation1h = true + } + } + return patch + + case "message_delta": + usageObj, _ := event["usage"].(map[string]any) + if len(usageObj) == 0 { + return nil + } + + patch := &sseUsagePatch{} + if v, ok := parseSSEUsageInt(usageObj["input_tokens"]); ok && v > 0 { + patch.inputTokens = v + patch.hasInputTokens = true } - if msgDelta.Usage.OutputTokens > 0 { - usage.OutputTokens = msgDelta.Usage.OutputTokens + if v, ok := parseSSEUsageInt(usageObj["output_tokens"]); ok && v > 0 { + patch.outputTokens = v + patch.hasOutputTokens = true } - if msgDelta.Usage.CacheCreationInputTokens > 0 { - usage.CacheCreationInputTokens = msgDelta.Usage.CacheCreationInputTokens + if v, ok := parseSSEUsageInt(usageObj["cache_creation_input_tokens"]); ok && v > 0 { + patch.cacheCreationInputTokens = v + patch.hasCacheCreationInput = true } - if msgDelta.Usage.CacheReadInputTokens > 0 { - usage.CacheReadInputTokens = msgDelta.Usage.CacheReadInputTokens + if v, ok := parseSSEUsageInt(usageObj["cache_read_input_tokens"]); ok && v > 0 { + patch.cacheReadInputTokens = v + patch.hasCacheReadInput = true } + if cc, ok := usageObj["cache_creation"].(map[string]any); ok { + if v, exists := parseSSEUsageInt(cc["ephemeral_5m_input_tokens"]); exists && v > 0 { + patch.cacheCreation5mTokens = v + patch.hasCacheCreation5m = true + } + if v, exists := parseSSEUsageInt(cc["ephemeral_1h_input_tokens"]); exists && v > 0 { + patch.cacheCreation1hTokens = v + patch.hasCacheCreation1h = true + } + } + return patch + } + + return nil +} + +func mergeSSEUsagePatch(usage *ClaudeUsage, patch *sseUsagePatch) { + if usage == nil || patch == nil { + return + } + + if patch.hasInputTokens { + usage.InputTokens = patch.inputTokens + } + if patch.hasCacheCreationInput { + usage.CacheCreationInputTokens = patch.cacheCreationInputTokens + } + if patch.hasCacheReadInput { + usage.CacheReadInputTokens = patch.cacheReadInputTokens + } + if patch.hasOutputTokens { + usage.OutputTokens = patch.outputTokens + } + if patch.hasCacheCreation5m { + usage.CacheCreation5mTokens = patch.cacheCreation5mTokens + } + if patch.hasCacheCreation1h { + usage.CacheCreation1hTokens = patch.cacheCreation1hTokens + } +} - // 解析嵌套的 cache_creation 对象中的 5m/1h 明细 - cc5m := gjson.Get(data, "usage.cache_creation.ephemeral_5m_input_tokens") - cc1h := gjson.Get(data, "usage.cache_creation.ephemeral_1h_input_tokens") - if cc5m.Exists() && cc5m.Int() > 0 { - usage.CacheCreation5mTokens = int(cc5m.Int()) +func parseSSEUsageInt(value any) (int, bool) { + switch v := value.(type) { + case float64: + return int(v), true + case float32: + return int(v), true + case int: + return v, true + case int64: + return int(v), true + case int32: + return int(v), true + case json.Number: + if i, err := v.Int64(); err == nil { + return int(i), true } - if cc1h.Exists() && cc1h.Int() > 0 { - usage.CacheCreation1hTokens = int(cc1h.Int()) + if f, err := v.Float64(); err == nil { + return int(f), true + } + case string: + if parsed, err := strconv.Atoi(strings.TrimSpace(v)); err == nil { + return parsed, true } } + return 0, false } // applyCacheTTLOverride 将所有 cache creation tokens 归入指定的 TTL 类型。 @@ -5391,25 +5504,32 @@ func applyCacheTTLOverride(usage *ClaudeUsage, target string) bool { // rewriteCacheCreationJSON 在 JSON usage 对象中重写 cache_creation 嵌套对象的 TTL 分类。 // usageObj 是 usage JSON 对象(map[string]any)。 -func rewriteCacheCreationJSON(usageObj map[string]any, target string) { +func rewriteCacheCreationJSON(usageObj map[string]any, target string) bool { ccObj, ok := usageObj["cache_creation"].(map[string]any) if !ok { - return + return false } - v5m, _ := ccObj["ephemeral_5m_input_tokens"].(float64) - v1h, _ := ccObj["ephemeral_1h_input_tokens"].(float64) + v5m, _ := parseSSEUsageInt(ccObj["ephemeral_5m_input_tokens"]) + v1h, _ := parseSSEUsageInt(ccObj["ephemeral_1h_input_tokens"]) total := v5m + v1h if total == 0 { - return + return false } switch target { case "1h": - ccObj["ephemeral_1h_input_tokens"] = total + if v1h == total { + return false + } + ccObj["ephemeral_1h_input_tokens"] = float64(total) ccObj["ephemeral_5m_input_tokens"] = float64(0) default: // "5m" - ccObj["ephemeral_5m_input_tokens"] = total + if v5m == total { + return false + } + ccObj["ephemeral_5m_input_tokens"] = float64(total) ccObj["ephemeral_1h_input_tokens"] = float64(0) } + return true } func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, originalModel, mappedModel string) (*ClaudeUsage, error) { diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index 8670f99aa..b51744511 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -229,6 +229,16 @@ func (s *GeminiMessagesCompatService) isAccountUsableForRequest( account *Account, requestedModel, platform string, useMixedScheduling bool, +) bool { + return s.isAccountUsableForRequestWithPrecheck(ctx, account, requestedModel, platform, useMixedScheduling, nil) +} + +func (s *GeminiMessagesCompatService) isAccountUsableForRequestWithPrecheck( + ctx context.Context, + account *Account, + requestedModel, platform string, + useMixedScheduling bool, + precheckResult map[int64]bool, ) bool { // 检查模型调度能力 // Check model scheduling capability @@ -250,7 +260,7 @@ func (s *GeminiMessagesCompatService) isAccountUsableForRequest( // 速率限制预检 // Rate limit precheck - if !s.passesRateLimitPreCheck(ctx, account, requestedModel) { + if !s.passesRateLimitPreCheckWithCache(ctx, account, requestedModel, precheckResult) { return false } @@ -278,9 +288,20 @@ func (s *GeminiMessagesCompatService) isAccountValidForPlatform(account *Account // passesRateLimitPreCheck performs rate limit precheck. // Returns true if passed or precheck not required. func (s *GeminiMessagesCompatService) passesRateLimitPreCheck(ctx context.Context, account *Account, requestedModel string) bool { + return s.passesRateLimitPreCheckWithCache(ctx, account, requestedModel, nil) +} + +func (s *GeminiMessagesCompatService) passesRateLimitPreCheckWithCache(ctx context.Context, account *Account, requestedModel string, precheckResult map[int64]bool) bool { if s.rateLimitService == nil || requestedModel == "" { return true } + + if precheckResult != nil { + if ok, exists := precheckResult[account.ID]; exists { + return ok + } + } + ok, err := s.rateLimitService.PreCheckUsage(ctx, account, requestedModel) if err != nil { logger.LegacyPrintf("service.gemini_messages_compat", "[Gemini PreCheck] Account %d precheck error: %v", account.ID, err) @@ -302,6 +323,7 @@ func (s *GeminiMessagesCompatService) selectBestGeminiAccount( useMixedScheduling bool, ) *Account { var selected *Account + precheckResult := s.buildPreCheckUsageResultMap(ctx, accounts, requestedModel) for i := range accounts { acc := &accounts[i] @@ -312,7 +334,7 @@ func (s *GeminiMessagesCompatService) selectBestGeminiAccount( } // 检查账号是否可用于当前请求 - if !s.isAccountUsableForRequest(ctx, acc, requestedModel, platform, useMixedScheduling) { + if !s.isAccountUsableForRequestWithPrecheck(ctx, acc, requestedModel, platform, useMixedScheduling, precheckResult) { continue } @@ -330,6 +352,23 @@ func (s *GeminiMessagesCompatService) selectBestGeminiAccount( return selected } +func (s *GeminiMessagesCompatService) buildPreCheckUsageResultMap(ctx context.Context, accounts []Account, requestedModel string) map[int64]bool { + if s.rateLimitService == nil || requestedModel == "" || len(accounts) == 0 { + return nil + } + + candidates := make([]*Account, 0, len(accounts)) + for i := range accounts { + candidates = append(candidates, &accounts[i]) + } + + result, err := s.rateLimitService.PreCheckUsageBatch(ctx, candidates, requestedModel) + if err != nil { + logger.LegacyPrintf("service.gemini_messages_compat", "[Gemini PreCheckBatch] failed: %v", err) + } + return result +} + // isBetterGeminiAccount 判断 candidate 是否比 current 更优。 // 规则:优先级更高(数值更小)优先;同优先级时,未使用过的优先(OAuth > 非 OAuth),其次是最久未使用的。 // diff --git a/backend/internal/service/ratelimit_service.go b/backend/internal/service/ratelimit_service.go index 812e059a9..0fe991482 100644 --- a/backend/internal/service/ratelimit_service.go +++ b/backend/internal/service/ratelimit_service.go @@ -33,6 +33,10 @@ type geminiUsageCacheEntry struct { totals GeminiUsageTotals } +type geminiUsageTotalsBatchProvider interface { + GetGeminiUsageTotalsBatch(ctx context.Context, accountIDs []int64, startTime, endTime time.Time) (map[int64]GeminiUsageTotals, error) +} + const geminiPrecheckCacheTTL = time.Minute // NewRateLimitService 创建RateLimitService实例 @@ -302,6 +306,218 @@ func (s *RateLimitService) PreCheckUsage(ctx context.Context, account *Account, return true, nil } +// PreCheckUsageBatch performs quota precheck for multiple accounts in one request. +// Returned map value=false means the account should be skipped. +func (s *RateLimitService) PreCheckUsageBatch(ctx context.Context, accounts []*Account, requestedModel string) (map[int64]bool, error) { + result := make(map[int64]bool, len(accounts)) + for _, account := range accounts { + if account == nil { + continue + } + result[account.ID] = true + } + + if len(accounts) == 0 || requestedModel == "" { + return result, nil + } + if s.usageRepo == nil || s.geminiQuotaService == nil { + return result, nil + } + + modelClass := geminiModelClassFromName(requestedModel) + now := time.Now() + dailyStart := geminiDailyWindowStart(now) + minuteStart := now.Truncate(time.Minute) + + type quotaAccount struct { + account *Account + quota GeminiQuota + } + quotaAccounts := make([]quotaAccount, 0, len(accounts)) + for _, account := range accounts { + if account == nil || account.Platform != PlatformGemini { + continue + } + quota, ok := s.geminiQuotaService.QuotaForAccount(ctx, account) + if !ok { + continue + } + quotaAccounts = append(quotaAccounts, quotaAccount{ + account: account, + quota: quota, + }) + } + if len(quotaAccounts) == 0 { + return result, nil + } + + // 1) Daily precheck (cached + batch DB fallback) + dailyTotalsByID := make(map[int64]GeminiUsageTotals, len(quotaAccounts)) + dailyMissIDs := make([]int64, 0, len(quotaAccounts)) + for _, item := range quotaAccounts { + limit := geminiDailyLimit(item.quota, modelClass) + if limit <= 0 { + continue + } + accountID := item.account.ID + if totals, ok := s.getGeminiUsageTotals(accountID, dailyStart, now); ok { + dailyTotalsByID[accountID] = totals + continue + } + dailyMissIDs = append(dailyMissIDs, accountID) + } + if len(dailyMissIDs) > 0 { + totalsBatch, err := s.getGeminiUsageTotalsBatch(ctx, dailyMissIDs, dailyStart, now) + if err != nil { + return result, err + } + for _, accountID := range dailyMissIDs { + totals := totalsBatch[accountID] + dailyTotalsByID[accountID] = totals + s.setGeminiUsageTotals(accountID, dailyStart, now, totals) + } + } + for _, item := range quotaAccounts { + limit := geminiDailyLimit(item.quota, modelClass) + if limit <= 0 { + continue + } + accountID := item.account.ID + used := geminiUsedRequests(item.quota, modelClass, dailyTotalsByID[accountID], true) + if used >= limit { + resetAt := geminiDailyResetTime(now) + slog.Info("gemini_precheck_daily_quota_reached_batch", "account_id", accountID, "used", used, "limit", limit, "reset_at", resetAt) + result[accountID] = false + } + } + + // 2) Minute precheck (batch DB) + minuteIDs := make([]int64, 0, len(quotaAccounts)) + for _, item := range quotaAccounts { + accountID := item.account.ID + if !result[accountID] { + continue + } + if geminiMinuteLimit(item.quota, modelClass) <= 0 { + continue + } + minuteIDs = append(minuteIDs, accountID) + } + if len(minuteIDs) == 0 { + return result, nil + } + + minuteTotalsByID, err := s.getGeminiUsageTotalsBatch(ctx, minuteIDs, minuteStart, now) + if err != nil { + return result, err + } + for _, item := range quotaAccounts { + accountID := item.account.ID + if !result[accountID] { + continue + } + + limit := geminiMinuteLimit(item.quota, modelClass) + if limit <= 0 { + continue + } + + used := geminiUsedRequests(item.quota, modelClass, minuteTotalsByID[accountID], false) + if used >= limit { + resetAt := minuteStart.Add(time.Minute) + slog.Info("gemini_precheck_minute_quota_reached_batch", "account_id", accountID, "used", used, "limit", limit, "reset_at", resetAt) + result[accountID] = false + } + } + + return result, nil +} + +func (s *RateLimitService) getGeminiUsageTotalsBatch(ctx context.Context, accountIDs []int64, start, end time.Time) (map[int64]GeminiUsageTotals, error) { + result := make(map[int64]GeminiUsageTotals, len(accountIDs)) + if len(accountIDs) == 0 { + return result, nil + } + + ids := make([]int64, 0, len(accountIDs)) + seen := make(map[int64]struct{}, len(accountIDs)) + for _, accountID := range accountIDs { + if accountID <= 0 { + continue + } + if _, ok := seen[accountID]; ok { + continue + } + seen[accountID] = struct{}{} + ids = append(ids, accountID) + } + if len(ids) == 0 { + return result, nil + } + + if batchReader, ok := s.usageRepo.(geminiUsageTotalsBatchProvider); ok { + stats, err := batchReader.GetGeminiUsageTotalsBatch(ctx, ids, start, end) + if err != nil { + return nil, err + } + for _, accountID := range ids { + result[accountID] = stats[accountID] + } + return result, nil + } + + for _, accountID := range ids { + stats, err := s.usageRepo.GetModelStatsWithFilters(ctx, start, end, 0, 0, accountID, 0, nil, nil, nil) + if err != nil { + return nil, err + } + result[accountID] = geminiAggregateUsage(stats) + } + return result, nil +} + +func geminiDailyLimit(quota GeminiQuota, modelClass geminiModelClass) int64 { + if quota.SharedRPD > 0 { + return quota.SharedRPD + } + switch modelClass { + case geminiModelFlash: + return quota.FlashRPD + default: + return quota.ProRPD + } +} + +func geminiMinuteLimit(quota GeminiQuota, modelClass geminiModelClass) int64 { + if quota.SharedRPM > 0 { + return quota.SharedRPM + } + switch modelClass { + case geminiModelFlash: + return quota.FlashRPM + default: + return quota.ProRPM + } +} + +func geminiUsedRequests(quota GeminiQuota, modelClass geminiModelClass, totals GeminiUsageTotals, daily bool) int64 { + if daily { + if quota.SharedRPD > 0 { + return totals.ProRequests + totals.FlashRequests + } + } else { + if quota.SharedRPM > 0 { + return totals.ProRequests + totals.FlashRequests + } + } + switch modelClass { + case geminiModelFlash: + return totals.FlashRequests + default: + return totals.ProRequests + } +} + func (s *RateLimitService) getGeminiUsageTotals(accountID int64, windowStart, now time.Time) (GeminiUsageTotals, bool) { s.usageCacheMu.RLock() defer s.usageCacheMu.RUnlock() diff --git a/backend/internal/service/scheduler_snapshot_service.go b/backend/internal/service/scheduler_snapshot_service.go index 4d95743ce..9f8fa14ac 100644 --- a/backend/internal/service/scheduler_snapshot_service.go +++ b/backend/internal/service/scheduler_snapshot_service.go @@ -305,13 +305,78 @@ func (s *SchedulerSnapshotService) handleBulkAccountEvent(ctx context.Context, p if payload == nil { return nil } - ids := parseInt64Slice(payload["account_ids"]) - for _, id := range ids { - if err := s.handleAccountEvent(ctx, &id, payload); err != nil { - return err + if s.accountRepo == nil { + return nil + } + + rawIDs := parseInt64Slice(payload["account_ids"]) + if len(rawIDs) == 0 { + return nil + } + + ids := make([]int64, 0, len(rawIDs)) + seen := make(map[int64]struct{}, len(rawIDs)) + for _, id := range rawIDs { + if id <= 0 { + continue } + if _, exists := seen[id]; exists { + continue + } + seen[id] = struct{}{} + ids = append(ids, id) } - return nil + if len(ids) == 0 { + return nil + } + + preloadGroupIDs := parseInt64Slice(payload["group_ids"]) + accounts, err := s.accountRepo.GetByIDs(ctx, ids) + if err != nil { + return err + } + + found := make(map[int64]struct{}, len(accounts)) + rebuildGroupSet := make(map[int64]struct{}, len(preloadGroupIDs)) + for _, gid := range preloadGroupIDs { + if gid > 0 { + rebuildGroupSet[gid] = struct{}{} + } + } + + for _, account := range accounts { + if account == nil || account.ID <= 0 { + continue + } + found[account.ID] = struct{}{} + if s.cache != nil { + if err := s.cache.SetAccount(ctx, account); err != nil { + return err + } + } + for _, gid := range account.GroupIDs { + if gid > 0 { + rebuildGroupSet[gid] = struct{}{} + } + } + } + + if s.cache != nil { + for _, id := range ids { + if _, ok := found[id]; ok { + continue + } + if err := s.cache.DeleteAccount(ctx, id); err != nil { + return err + } + } + } + + rebuildGroupIDs := make([]int64, 0, len(rebuildGroupSet)) + for gid := range rebuildGroupSet { + rebuildGroupIDs = append(rebuildGroupIDs, gid) + } + return s.rebuildByGroupIDs(ctx, rebuildGroupIDs, "account_bulk_change") } func (s *SchedulerSnapshotService) handleAccountEvent(ctx context.Context, accountID *int64, payload map[string]any) error { From 3984b94481cbb6f26e0f7e40de770e4db77fd570 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 19:56:25 +0800 Subject: [PATCH 027/120] =?UTF-8?q?fix(service):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E6=89=B9=E9=87=8F=E8=B4=A6=E5=8F=B7=E6=9B=B4=E6=96=B0=E6=A0=A1?= =?UTF-8?q?=E9=AA=8C=E4=B8=8E=E7=94=A8=E6=88=B7=E5=80=8D=E7=8E=87=E5=8A=A0?= =?UTF-8?q?=E8=BD=BD=E5=9B=9E=E9=80=80?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 为分组存在性校验增加 groupRepo 空指针防护,避免运行时崩溃 - 在批量绑定成功后回写混合渠道预加载快照,防止同批次漏检 - 批量加载用户分组倍率失败时自动回退到逐用户查询 - 补充并更新 unit 回归测试覆盖上述场景 Co-Authored-By: Claude Opus 4.6 --- backend/internal/service/account_service.go | 3 + backend/internal/service/admin_service.go | 61 ++++++++-- .../service/admin_service_bulk_update_test.go | 67 ++++++++++- .../service/admin_service_list_users_test.go | 106 ++++++++++++++++++ 4 files changed, 227 insertions(+), 10 deletions(-) create mode 100644 backend/internal/service/admin_service_list_users_test.go diff --git a/backend/internal/service/account_service.go b/backend/internal/service/account_service.go index be18cdd19..a37071842 100644 --- a/backend/internal/service/account_service.go +++ b/backend/internal/service/account_service.go @@ -302,6 +302,9 @@ func (s *AccountService) validateGroupIDsExist(ctx context.Context, groupIDs []i if len(groupIDs) == 0 { return nil } + if s.groupRepo == nil { + return fmt.Errorf("group repository not configured") + } if batchChecker, ok := s.groupRepo.(groupExistenceBatchChecker); ok { existsByID, err := batchChecker.ExistsByIDs(ctx, groupIDs) diff --git a/backend/internal/service/admin_service.go b/backend/internal/service/admin_service.go index bde64c28a..003924645 100644 --- a/backend/internal/service/admin_service.go +++ b/backend/internal/service/admin_service.go @@ -458,6 +458,7 @@ func (s *adminServiceImpl) ListUsers(ctx context.Context, page, pageSize int, fi ratesByUser, err := batchRepo.GetByUserIDs(ctx, userIDs) if err != nil { logger.LegacyPrintf("service.admin", "failed to load user group rates in batch: err=%v", err) + s.loadUserGroupRatesOneByOne(ctx, users) } else { for i := range users { if rates, ok := ratesByUser[users[i].ID]; ok { @@ -466,19 +467,26 @@ func (s *adminServiceImpl) ListUsers(ctx context.Context, page, pageSize int, fi } } } else { - for i := range users { - rates, err := s.userGroupRateRepo.GetByUserID(ctx, users[i].ID) - if err != nil { - logger.LegacyPrintf("service.admin", "failed to load user group rates: user_id=%d err=%v", users[i].ID, err) - continue - } - users[i].GroupRates = rates - } + s.loadUserGroupRatesOneByOne(ctx, users) } } return users, result.Total, nil } +func (s *adminServiceImpl) loadUserGroupRatesOneByOne(ctx context.Context, users []User) { + if s.userGroupRateRepo == nil { + return + } + for i := range users { + rates, err := s.userGroupRateRepo.GetByUserID(ctx, users[i].ID) + if err != nil { + logger.LegacyPrintf("service.admin", "failed to load user group rates: user_id=%d err=%v", users[i].ID, err) + continue + } + users[i].GroupRates = rates + } +} + func (s *adminServiceImpl) GetUser(ctx context.Context, id int64) (*User, error) { user, err := s.userRepo.GetByID(ctx, id) if err != nil { @@ -1446,11 +1454,12 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp // Handle group bindings per account (requires individual operations). for _, accountID := range input.AccountIDs { entry := BulkUpdateAccountResult{AccountID: accountID} + platform := "" if input.GroupIDs != nil { // 检查混合渠道风险(除非用户已确认) if !input.SkipMixedChannelCheck { - platform := platformByID[accountID] + platform = platformByID[accountID] if platform == "" { account, err := s.accountRepo.GetByID(ctx, accountID) if err != nil { @@ -1481,6 +1490,9 @@ func (s *adminServiceImpl) BulkUpdateAccounts(ctx context.Context, input *BulkUp result.Results = append(result.Results, entry) continue } + if !input.SkipMixedChannelCheck && platform != "" { + updateMixedChannelPreloadedAccounts(groupAccountsByID, *input.GroupIDs, accountID, platform) + } } entry.Success = true @@ -2191,6 +2203,9 @@ func (s *adminServiceImpl) validateGroupIDsExist(ctx context.Context, groupIDs [ if len(groupIDs) == 0 { return nil } + if s.groupRepo == nil { + return errors.New("group repository not configured") + } if batchReader, ok := s.groupRepo.(groupExistenceBatchReader); ok { existsByID, err := batchReader.ExistsByIDs(ctx, groupIDs) @@ -2250,6 +2265,34 @@ func (s *adminServiceImpl) checkMixedChannelRiskWithPreloaded(currentAccountID i return nil } +func updateMixedChannelPreloadedAccounts(accountsByGroup map[int64][]Account, groupIDs []int64, accountID int64, platform string) { + if len(groupIDs) == 0 || accountID <= 0 || platform == "" { + return + } + for _, groupID := range groupIDs { + if groupID <= 0 { + continue + } + accounts := accountsByGroup[groupID] + found := false + for i := range accounts { + if accounts[i].ID != accountID { + continue + } + accounts[i].Platform = platform + found = true + break + } + if !found { + accounts = append(accounts, Account{ + ID: accountID, + Platform: platform, + }) + } + accountsByGroup[groupID] = accounts + } +} + // CheckMixedChannelRisk checks whether target groups contain mixed channels for the current account platform. func (s *adminServiceImpl) CheckMixedChannelRisk(ctx context.Context, currentAccountID int64, currentAccountPlatform string, groupIDs []int64) error { return s.checkMixedChannelRisk(ctx, currentAccountID, currentAccountPlatform, groupIDs) diff --git a/backend/internal/service/admin_service_bulk_update_test.go b/backend/internal/service/admin_service_bulk_update_test.go index 0dccacbb8..647a84a96 100644 --- a/backend/internal/service/admin_service_bulk_update_test.go +++ b/backend/internal/service/admin_service_bulk_update_test.go @@ -15,6 +15,7 @@ type accountRepoStubForBulkUpdate struct { bulkUpdateErr error bulkUpdateIDs []int64 bindGroupErrByID map[int64]error + bindGroupsCalls []int64 getByIDsAccounts []*Account getByIDsErr error getByIDsCalled bool @@ -22,6 +23,8 @@ type accountRepoStubForBulkUpdate struct { getByIDAccounts map[int64]*Account getByIDErrByID map[int64]error getByIDCalled []int64 + listByGroupData map[int64][]Account + listByGroupErr map[int64]error } func (s *accountRepoStubForBulkUpdate) BulkUpdate(_ context.Context, ids []int64, _ AccountBulkUpdate) (int64, error) { @@ -33,6 +36,7 @@ func (s *accountRepoStubForBulkUpdate) BulkUpdate(_ context.Context, ids []int64 } func (s *accountRepoStubForBulkUpdate) BindGroups(_ context.Context, accountID int64, _ []int64) error { + s.bindGroupsCalls = append(s.bindGroupsCalls, accountID) if err, ok := s.bindGroupErrByID[accountID]; ok { return err } @@ -59,6 +63,16 @@ func (s *accountRepoStubForBulkUpdate) GetByID(_ context.Context, id int64) (*Ac return nil, errors.New("account not found") } +func (s *accountRepoStubForBulkUpdate) ListByGroup(_ context.Context, groupID int64) ([]Account, error) { + if err, ok := s.listByGroupErr[groupID]; ok { + return nil, err + } + if rows, ok := s.listByGroupData[groupID]; ok { + return rows, nil + } + return nil, nil +} + // TestAdminService_BulkUpdateAccounts_AllSuccessIDs 验证批量更新成功时返回 success_ids/failed_ids。 func TestAdminService_BulkUpdateAccounts_AllSuccessIDs(t *testing.T) { repo := &accountRepoStubForBulkUpdate{} @@ -86,7 +100,10 @@ func TestAdminService_BulkUpdateAccounts_PartialFailureIDs(t *testing.T) { 2: errors.New("bind failed"), }, } - svc := &adminServiceImpl{accountRepo: repo} + svc := &adminServiceImpl{ + accountRepo: repo, + groupRepo: &groupRepoStubForAdmin{getByID: &Group{ID: 10, Name: "g10"}}, + } groupIDs := []int64{10} schedulable := false @@ -105,3 +122,51 @@ func TestAdminService_BulkUpdateAccounts_PartialFailureIDs(t *testing.T) { require.ElementsMatch(t, []int64{2}, result.FailedIDs) require.Len(t, result.Results, 3) } + +func TestAdminService_BulkUpdateAccounts_NilGroupRepoReturnsError(t *testing.T) { + repo := &accountRepoStubForBulkUpdate{} + svc := &adminServiceImpl{accountRepo: repo} + + groupIDs := []int64{10} + input := &BulkUpdateAccountsInput{ + AccountIDs: []int64{1}, + GroupIDs: &groupIDs, + } + + result, err := svc.BulkUpdateAccounts(context.Background(), input) + require.Nil(t, result) + require.Error(t, err) + require.Contains(t, err.Error(), "group repository not configured") +} + +func TestAdminService_BulkUpdateAccounts_MixedChannelCheckUsesUpdatedSnapshot(t *testing.T) { + repo := &accountRepoStubForBulkUpdate{ + getByIDsAccounts: []*Account{ + {ID: 1, Platform: PlatformAnthropic}, + {ID: 2, Platform: PlatformAntigravity}, + }, + listByGroupData: map[int64][]Account{ + 10: {}, + }, + } + svc := &adminServiceImpl{ + accountRepo: repo, + groupRepo: &groupRepoStubForAdmin{getByID: &Group{ID: 10, Name: "目标分组"}}, + } + + groupIDs := []int64{10} + input := &BulkUpdateAccountsInput{ + AccountIDs: []int64{1, 2}, + GroupIDs: &groupIDs, + } + + result, err := svc.BulkUpdateAccounts(context.Background(), input) + require.NoError(t, err) + require.Equal(t, 1, result.Success) + require.Equal(t, 1, result.Failed) + require.ElementsMatch(t, []int64{1}, result.SuccessIDs) + require.ElementsMatch(t, []int64{2}, result.FailedIDs) + require.Len(t, result.Results, 2) + require.Contains(t, result.Results[1].Error, "mixed channel") + require.Equal(t, []int64{1}, repo.bindGroupsCalls) +} diff --git a/backend/internal/service/admin_service_list_users_test.go b/backend/internal/service/admin_service_list_users_test.go new file mode 100644 index 000000000..8b50530a0 --- /dev/null +++ b/backend/internal/service/admin_service_list_users_test.go @@ -0,0 +1,106 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/stretchr/testify/require" +) + +type userRepoStubForListUsers struct { + userRepoStub + users []User + err error +} + +func (s *userRepoStubForListUsers) ListWithFilters(_ context.Context, params pagination.PaginationParams, _ UserListFilters) ([]User, *pagination.PaginationResult, error) { + if s.err != nil { + return nil, nil, s.err + } + out := make([]User, len(s.users)) + copy(out, s.users) + return out, &pagination.PaginationResult{ + Total: int64(len(out)), + Page: params.Page, + PageSize: params.PageSize, + }, nil +} + +type userGroupRateRepoStubForListUsers struct { + batchCalls int + singleCall []int64 + + batchErr error + batchData map[int64]map[int64]float64 + + singleErr map[int64]error + singleData map[int64]map[int64]float64 +} + +func (s *userGroupRateRepoStubForListUsers) GetByUserIDs(_ context.Context, _ []int64) (map[int64]map[int64]float64, error) { + s.batchCalls++ + if s.batchErr != nil { + return nil, s.batchErr + } + return s.batchData, nil +} + +func (s *userGroupRateRepoStubForListUsers) GetByUserID(_ context.Context, userID int64) (map[int64]float64, error) { + s.singleCall = append(s.singleCall, userID) + if err, ok := s.singleErr[userID]; ok { + return nil, err + } + if rates, ok := s.singleData[userID]; ok { + return rates, nil + } + return map[int64]float64{}, nil +} + +func (s *userGroupRateRepoStubForListUsers) GetByUserAndGroup(_ context.Context, userID, groupID int64) (*float64, error) { + panic("unexpected GetByUserAndGroup call") +} + +func (s *userGroupRateRepoStubForListUsers) SyncUserGroupRates(_ context.Context, userID int64, rates map[int64]*float64) error { + panic("unexpected SyncUserGroupRates call") +} + +func (s *userGroupRateRepoStubForListUsers) DeleteByGroupID(_ context.Context, groupID int64) error { + panic("unexpected DeleteByGroupID call") +} + +func (s *userGroupRateRepoStubForListUsers) DeleteByUserID(_ context.Context, userID int64) error { + panic("unexpected DeleteByUserID call") +} + +func TestAdminService_ListUsers_BatchRateFallbackToSingle(t *testing.T) { + userRepo := &userRepoStubForListUsers{ + users: []User{ + {ID: 101, Username: "u1"}, + {ID: 202, Username: "u2"}, + }, + } + rateRepo := &userGroupRateRepoStubForListUsers{ + batchErr: errors.New("batch unavailable"), + singleData: map[int64]map[int64]float64{ + 101: {11: 1.1}, + 202: {22: 2.2}, + }, + } + svc := &adminServiceImpl{ + userRepo: userRepo, + userGroupRateRepo: rateRepo, + } + + users, total, err := svc.ListUsers(context.Background(), 1, 20, UserListFilters{}) + require.NoError(t, err) + require.Equal(t, int64(2), total) + require.Len(t, users, 2) + require.Equal(t, 1, rateRepo.batchCalls) + require.ElementsMatch(t, []int64{101, 202}, rateRepo.singleCall) + require.Equal(t, 1.1, users[0].GroupRates[11]) + require.Equal(t, 2.2, users[1].GroupRates[22]) +} From 5ff12f479fddeb93b7fd07228e489c82de9643c4 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 19:57:00 +0800 Subject: [PATCH 028/120] =?UTF-8?q?chore(version):=20=E6=9B=B4=E6=96=B0?= =?UTF-8?q?=E7=89=88=E6=9C=AC=E5=8F=B7=E8=87=B3=200.1.85.5?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/backend/cmd/server/VERSION b/backend/cmd/server/VERSION index 44cd42b3b..bf1ab8bcd 100644 --- a/backend/cmd/server/VERSION +++ b/backend/cmd/server/VERSION @@ -1 +1 @@ -0.1.85.4 +0.1.85.5 From ffc6f6caaab85307ed95179c8247d65ae1623eaf Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 20:16:01 +0800 Subject: [PATCH 029/120] =?UTF-8?q?fix(migrations):=20=E5=85=BC=E5=AE=B905?= =?UTF-8?q?4=E8=BF=81=E7=A7=BBchecksum=E5=8E=86=E5=8F=B2=E5=88=86=E5=8F=89?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 针对 054_drop_legacy_cache_columns.sql 增加受控兼容规则,允许历史数据库 checksum 与当前文件 checksum 在白名单条件下通过校验。 新增单元测试覆盖兼容命中与未命中场景,避免后续回归。 --- .../internal/repository/migrations_runner.go | 32 ++++++++++++++++ .../migrations_runner_checksum_test.go | 37 +++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 backend/internal/repository/migrations_runner_checksum_test.go diff --git a/backend/internal/repository/migrations_runner.go b/backend/internal/repository/migrations_runner.go index 5912e50f5..8cfaf4e6e 100644 --- a/backend/internal/repository/migrations_runner.go +++ b/backend/internal/repository/migrations_runner.go @@ -51,6 +51,22 @@ CREATE TABLE IF NOT EXISTS atlas_schema_revisions ( const migrationsAdvisoryLockID int64 = 694208311321144027 const migrationsLockRetryInterval = 500 * time.Millisecond +type migrationChecksumCompatibilityRule struct { + fileChecksum string + acceptedDBChecksum map[string]struct{} +} + +// migrationChecksumCompatibilityRules 仅用于兼容历史上误修改过的迁移文件 checksum。 +// 规则必须同时匹配「迁移名 + 当前文件 checksum + 历史库 checksum」才会放行,避免放宽全局校验。 +var migrationChecksumCompatibilityRules = map[string]migrationChecksumCompatibilityRule{ + "054_drop_legacy_cache_columns.sql": { + fileChecksum: "82de761156e03876653e7a6a4eee883cd927847036f779b0b9f34c42a8af7a7d", + acceptedDBChecksum: map[string]struct{}{ + "182c193f3359946cf094090cd9e57d5c3fd9abaffbc1e8fc378646b8a6fa12b4": {}, + }, + }, +} + // ApplyMigrations 将嵌入的 SQL 迁移文件应用到指定的数据库。 // // 该函数可以在每次应用启动时安全调用: @@ -147,6 +163,10 @@ func applyMigrationsFS(ctx context.Context, db *sql.DB, fsys fs.FS) error { if rowErr == nil { // 迁移已应用,验证校验和是否匹配 if existing != checksum { + // 兼容特定历史误改场景(仅白名单规则),其余仍保持严格不可变约束。 + if isMigrationChecksumCompatible(name, existing, checksum) { + continue + } // 校验和不匹配意味着迁移文件在应用后被修改,这是危险的。 // 正确的做法是创建新的迁移文件来进行变更。 return fmt.Errorf( @@ -268,6 +288,18 @@ func latestMigrationBaseline(fsys fs.FS) (string, string, string, error) { return version, version, hash, nil } +func isMigrationChecksumCompatible(name, dbChecksum, fileChecksum string) bool { + rule, ok := migrationChecksumCompatibilityRules[name] + if !ok { + return false + } + if rule.fileChecksum != fileChecksum { + return false + } + _, ok = rule.acceptedDBChecksum[dbChecksum] + return ok +} + // pgAdvisoryLock 获取 PostgreSQL Advisory Lock。 // Advisory Lock 是一种轻量级的锁机制,不与任何特定的数据库对象关联。 // 它非常适合用于应用层面的分布式锁场景,如迁移序列化。 diff --git a/backend/internal/repository/migrations_runner_checksum_test.go b/backend/internal/repository/migrations_runner_checksum_test.go new file mode 100644 index 000000000..26782cd8d --- /dev/null +++ b/backend/internal/repository/migrations_runner_checksum_test.go @@ -0,0 +1,37 @@ +package repository + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsMigrationChecksumCompatible(t *testing.T) { + t.Run("054历史checksum可兼容", func(t *testing.T) { + ok := isMigrationChecksumCompatible( + "054_drop_legacy_cache_columns.sql", + "182c193f3359946cf094090cd9e57d5c3fd9abaffbc1e8fc378646b8a6fa12b4", + "82de761156e03876653e7a6a4eee883cd927847036f779b0b9f34c42a8af7a7d", + ) + require.True(t, ok) + }) + + t.Run("054在未知文件checksum下不兼容", func(t *testing.T) { + ok := isMigrationChecksumCompatible( + "054_drop_legacy_cache_columns.sql", + "182c193f3359946cf094090cd9e57d5c3fd9abaffbc1e8fc378646b8a6fa12b4", + "0000000000000000000000000000000000000000000000000000000000000000", + ) + require.False(t, ok) + }) + + t.Run("非白名单迁移不兼容", func(t *testing.T) { + ok := isMigrationChecksumCompatible( + "001_init.sql", + "182c193f3359946cf094090cd9e57d5c3fd9abaffbc1e8fc378646b8a6fa12b4", + "82de761156e03876653e7a6a4eee883cd927847036f779b0b9f34c42a8af7a7d", + ) + require.False(t, ok) + }) +} + From af0e3327744c1d4a0c12c3d951b5b3047e4b87bb Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 20:39:21 +0800 Subject: [PATCH 030/120] =?UTF-8?q?fix(openai=5Fws):=20=E5=A2=9E=E5=8A=A0?= =?UTF-8?q?=E7=BB=AD=E9=93=BE=E5=A4=B1=E8=B4=A5=E8=AF=8A=E6=96=AD=E6=97=A5?= =?UTF-8?q?=E5=BF=97?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 在 WSv2 续链请求发送前输出 continuation_probe 日志 - 在 previous_response_not_found 错误分支输出完整上下文诊断 - 在入站 WS 多轮 turn 中增加链路对齐日志便于定位 ID 断链 Co-Authored-By: Claude Opus 4.6 --- .../internal/service/openai_ws_forwarder.go | 123 ++++++++++++++++++ 1 file changed, 123 insertions(+) diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index fc83283a6..b496ac769 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -1433,6 +1433,26 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( lease.QueueWaitDuration().Milliseconds(), previousResponseID != "", ) + if previousResponseID != "" { + logOpenAIWSModeInfo( + "continuation_probe account_id=%d account_type=%s conn_id=%s previous_response_id=%s preferred_conn_id=%s conn_reused=%v store_disabled=%v session_hash=%s header_session_id=%s header_conversation_id=%s session_id_source=%s conversation_id_source=%s has_turn_state=%v turn_state_len=%d has_prompt_cache_key=%v", + account.ID, + account.Type, + truncateOpenAIWSLogValue(connID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(previousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + lease.Reused(), + storeDisabled, + truncateOpenAIWSLogValue(sessionHash, 12), + openAIWSHeaderValueForLog(wsHeaders, "session_id"), + openAIWSHeaderValueForLog(wsHeaders, "conversation_id"), + normalizeOpenAIWSLogValue(sessionResolution.SessionSource), + normalizeOpenAIWSLogValue(sessionResolution.ConversationSource), + turnState != "", + len(turnState), + promptCacheKey != "", + ) + } if c != nil { SetOpsLatencyMs(c, OpsOpenAIWSConnPickMsKey, lease.ConnPickDuration().Milliseconds()) SetOpsLatencyMs(c, OpsOpenAIWSQueueWaitMsKey, lease.QueueWaitDuration().Milliseconds()) @@ -1697,6 +1717,31 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( errType, errMessage, ) + if fallbackReason == "previous_response_not_found" { + logOpenAIWSModeInfo( + "previous_response_not_found_diag account_id=%d account_type=%s conn_id=%s previous_response_id=%s response_id=%s event_idx=%d req_stream=%v store_disabled=%v conn_reused=%v session_hash=%s header_session_id=%s header_conversation_id=%s session_id_source=%s conversation_id_source=%s has_turn_state=%v turn_state_len=%d has_prompt_cache_key=%v err_code=%s err_type=%s err_message=%s", + account.ID, + account.Type, + connID, + truncateOpenAIWSLogValue(previousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(responseID, openAIWSIDValueMaxLen), + eventCount, + reqStream, + storeDisabled, + lease.Reused(), + truncateOpenAIWSLogValue(sessionHash, 12), + openAIWSHeaderValueForLog(wsHeaders, "session_id"), + openAIWSHeaderValueForLog(wsHeaders, "conversation_id"), + normalizeOpenAIWSLogValue(sessionResolution.SessionSource), + normalizeOpenAIWSLogValue(sessionResolution.ConversationSource), + turnState != "", + len(turnState), + promptCacheKey != "", + errCode, + errType, + errMessage, + ) + } // error 事件后连接不再可复用,避免回池后污染下一请求。 lease.MarkBroken() if !wroteDownstream && canFallback { @@ -2016,6 +2061,22 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( storeDisabled, ) } + if firstPayload.previousResponseID != "" { + logOpenAIWSModeInfo( + "ingress_ws_continuation_probe account_id=%d turn=%d previous_response_id=%s preferred_conn_id=%s session_hash=%s header_session_id=%s header_conversation_id=%s has_turn_state=%v turn_state_len=%d has_prompt_cache_key=%v store_disabled=%v", + account.ID, + 1, + truncateOpenAIWSLogValue(firstPayload.previousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(sessionHash, 12), + openAIWSHeaderValueForLog(baseAcquireReq.Headers, "session_id"), + openAIWSHeaderValueForLog(baseAcquireReq.Headers, "conversation_id"), + turnState != "", + len(turnState), + firstPayload.promptCacheKey != "", + storeDisabled, + ) + } acquireTimeout := s.openAIWSAcquireTimeout() if acquireTimeout <= 0 { @@ -2132,6 +2193,9 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( usage := OpenAIUsage{} var firstTokenMs *int reqStream := true + turnPreviousResponseID := openAIWSPayloadString(payload, "previous_response_id") + turnPromptCacheKey := openAIWSPayloadString(payload, "prompt_cache_key") + turnStoreDisabled := s.isOpenAIWSStoreDisabledInRequest(payload, account) eventCount := 0 tokenEventCount := 0 terminalEventCount := 0 @@ -2176,6 +2240,26 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } lastEventType = eventType } + if eventType == "error" { + errCodeRaw, errTypeRaw, errMsgRaw := parseOpenAIWSErrorEventFields(upstreamMessage) + fallbackReason, _ := classifyOpenAIWSErrorEventFromRaw(errCodeRaw, errTypeRaw, errMsgRaw) + errCode, errType, errMessage := summarizeOpenAIWSErrorEventFieldsFromRaw(errCodeRaw, errTypeRaw, errMsgRaw) + logOpenAIWSModeInfo( + "ingress_ws_error_event account_id=%d turn=%d conn_id=%s idx=%d fallback_reason=%s err_code=%s err_type=%s err_message=%s previous_response_id=%s response_id=%s store_disabled=%v has_prompt_cache_key=%v", + account.ID, + turn, + truncateOpenAIWSLogValue(lease.ConnID(), openAIWSIDValueMaxLen), + eventCount, + truncateOpenAIWSLogValue(fallbackReason, openAIWSLogValueMaxLen), + errCode, + errType, + errMessage, + truncateOpenAIWSLogValue(turnPreviousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(responseID, openAIWSIDValueMaxLen), + turnStoreDisabled, + turnPromptCacheKey != "", + ) + } isTokenEvent := isOpenAIWSTokenEvent(eventType) if isTokenEvent { tokenEventCount++ @@ -2287,6 +2371,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( turn := 1 turnRetry := 0 lastTurnFinishedAt := time.Time{} + lastTurnResponseID := "" skipBeforeTurn := false for { if !skipBeforeTurn && hooks != nil && hooks.BeforeTurn != nil { @@ -2333,6 +2418,27 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } } connID := sessionConnID + currentPreviousResponseID := openAIWSPayloadString(currentPayload, "previous_response_id") + if currentPreviousResponseID != "" { + expectedPrev := strings.TrimSpace(lastTurnResponseID) + chainedFromLast := expectedPrev != "" && currentPreviousResponseID == expectedPrev + logOpenAIWSModeInfo( + "ingress_ws_turn_chain account_id=%d turn=%d conn_id=%s previous_response_id=%s last_turn_response_id=%s chained_from_last=%v preferred_conn_id=%s header_session_id=%s header_conversation_id=%s has_turn_state=%v turn_state_len=%d has_prompt_cache_key=%v store_disabled=%v", + account.ID, + turn, + truncateOpenAIWSLogValue(connID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(currentPreviousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(expectedPrev, openAIWSIDValueMaxLen), + chainedFromLast, + truncateOpenAIWSLogValue(preferredConnID, openAIWSIDValueMaxLen), + openAIWSHeaderValueForLog(baseAcquireReq.Headers, "session_id"), + openAIWSHeaderValueForLog(baseAcquireReq.Headers, "conversation_id"), + turnState != "", + len(turnState), + openAIWSPayloadString(currentPayload, "prompt_cache_key") != "", + storeDisabled, + ) + } result, relayErr := sendAndRelay(turn, sessionLease, currentPayload, currentPayloadBytes, currentOriginalModel) if relayErr != nil { @@ -2373,6 +2479,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return errors.New("websocket turn result is nil") } responseID := strings.TrimSpace(result.RequestID) + lastTurnResponseID = responseID if responseID != "" && stateStore != nil { ttl := s.openAIWSResponseStickyTTL() @@ -2412,6 +2519,22 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( updatedHeaders, _ := s.buildOpenAIWSHeaders(c, account, token, wsDecision, isCodexCLI, turnState, strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)), nextPayload.promptCacheKey) baseAcquireReq.Headers = updatedHeaders } + if nextPayload.previousResponseID != "" { + expectedPrev := strings.TrimSpace(lastTurnResponseID) + chainedFromLast := expectedPrev != "" && nextPayload.previousResponseID == expectedPrev + logOpenAIWSModeInfo( + "ingress_ws_next_turn_chain account_id=%d turn=%d next_turn=%d conn_id=%s previous_response_id=%s last_turn_response_id=%s chained_from_last=%v has_prompt_cache_key=%v store_disabled=%v", + account.ID, + turn, + turn+1, + truncateOpenAIWSLogValue(connID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(nextPayload.previousResponseID, openAIWSIDValueMaxLen), + truncateOpenAIWSLogValue(expectedPrev, openAIWSIDValueMaxLen), + chainedFromLast, + nextPayload.promptCacheKey != "", + storeDisabled, + ) + } if stateStore != nil && nextPayload.previousResponseID != "" { if stickyConnID, ok := stateStore.GetResponseConn(nextPayload.previousResponseID); ok { if sessionConnID != "" && stickyConnID != "" && stickyConnID != sessionConnID { From 5b96048f4f1c222d6ec56660cd74aa4525f821fc Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Thu, 26 Feb 2026 20:41:08 +0800 Subject: [PATCH 031/120] =?UTF-8?q?fix(backend):=20=E4=BF=AE=E5=A4=8D?= =?UTF-8?q?=E6=9F=A5=E8=AF=A2=E8=B6=85=E6=97=B6=E5=88=A4=E5=AE=9A=E4=B8=8E?= =?UTF-8?q?=E6=A8=A1=E5=9E=8B=E6=98=A0=E5=B0=84=E7=BC=93=E5=AD=98=E5=A4=B1?= =?UTF-8?q?=E6=95=88?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - 将查询超时判定限定为 context.DeadlineExceeded - 新增超时判定测试覆盖 wrapped error 与 canceled 场景 - 为模型映射缓存增加内容签名避免原地改值导致缓存未失效 - 补充模型映射缓存原地修改失效测试 Co-Authored-By: Claude Opus 4.6 --- .../internal/repository/ops_repo_dashboard.go | 2 +- .../ops_repo_dashboard_timeout_test.go | 22 +++++++++++ backend/internal/service/account.go | 38 ++++++++++++++++++- .../internal/service/account_wildcard_test.go | 22 +++++++++++ 4 files changed, 82 insertions(+), 2 deletions(-) create mode 100644 backend/internal/repository/ops_repo_dashboard_timeout_test.go diff --git a/backend/internal/repository/ops_repo_dashboard.go b/backend/internal/repository/ops_repo_dashboard.go index 4246bcd82..fb70a439b 100644 --- a/backend/internal/repository/ops_repo_dashboard.go +++ b/backend/internal/repository/ops_repo_dashboard.go @@ -962,7 +962,7 @@ FROM combined` } func isQueryTimeoutErr(err error) bool { - return errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) + return errors.Is(err, context.DeadlineExceeded) } func buildUsageWhere(filter *service.OpsDashboardFilter, start, end time.Time, startIndex int) (join string, where string, args []any, nextIndex int) { diff --git a/backend/internal/repository/ops_repo_dashboard_timeout_test.go b/backend/internal/repository/ops_repo_dashboard_timeout_test.go new file mode 100644 index 000000000..76332ca0a --- /dev/null +++ b/backend/internal/repository/ops_repo_dashboard_timeout_test.go @@ -0,0 +1,22 @@ +package repository + +import ( + "context" + "fmt" + "testing" +) + +func TestIsQueryTimeoutErr(t *testing.T) { + if !isQueryTimeoutErr(context.DeadlineExceeded) { + t.Fatalf("context.DeadlineExceeded should be treated as query timeout") + } + if !isQueryTimeoutErr(fmt.Errorf("wrapped: %w", context.DeadlineExceeded)) { + t.Fatalf("wrapped context.DeadlineExceeded should be treated as query timeout") + } + if isQueryTimeoutErr(context.Canceled) { + t.Fatalf("context.Canceled should not be treated as query timeout") + } + if isQueryTimeoutErr(fmt.Errorf("wrapped: %w", context.Canceled)) { + t.Fatalf("wrapped context.Canceled should not be treated as query timeout") + } +} diff --git a/backend/internal/service/account.go b/backend/internal/service/account.go index 2ad2c5dbc..73834eee0 100644 --- a/backend/internal/service/account.go +++ b/backend/internal/service/account.go @@ -3,6 +3,7 @@ package service import ( "encoding/json" + "hash/fnv" "reflect" "sort" "strconv" @@ -58,6 +59,7 @@ type Account struct { modelMappingCacheCredentialsPtr uintptr modelMappingCacheRawPtr uintptr modelMappingCacheRawLen int + modelMappingCacheRawSig uint64 } type TempUnschedulableRule struct { @@ -361,21 +363,31 @@ func (a *Account) GetModelMapping() map[string]string { rawMapping, _ := a.Credentials["model_mapping"].(map[string]any) rawPtr := mapPtr(rawMapping) rawLen := len(rawMapping) + rawSig := uint64(0) + rawSigReady := false if a.modelMappingCacheReady && a.modelMappingCacheCredentialsPtr == credentialsPtr && a.modelMappingCacheRawPtr == rawPtr && a.modelMappingCacheRawLen == rawLen { - return a.modelMappingCache + rawSig = modelMappingSignature(rawMapping) + rawSigReady = true + if a.modelMappingCacheRawSig == rawSig { + return a.modelMappingCache + } } mapping := a.resolveModelMapping(rawMapping) + if !rawSigReady { + rawSig = modelMappingSignature(rawMapping) + } a.modelMappingCache = mapping a.modelMappingCacheReady = true a.modelMappingCacheCredentialsPtr = credentialsPtr a.modelMappingCacheRawPtr = rawPtr a.modelMappingCacheRawLen = rawLen + a.modelMappingCacheRawSig = rawSig return mapping } @@ -426,6 +438,30 @@ func mapPtr(m map[string]any) uintptr { return reflect.ValueOf(m).Pointer() } +func modelMappingSignature(rawMapping map[string]any) uint64 { + if len(rawMapping) == 0 { + return 0 + } + keys := make([]string, 0, len(rawMapping)) + for k := range rawMapping { + keys = append(keys, k) + } + sort.Strings(keys) + + h := fnv.New64a() + for _, k := range keys { + _, _ = h.Write([]byte(k)) + _, _ = h.Write([]byte{0}) + if v, ok := rawMapping[k].(string); ok { + _, _ = h.Write([]byte(v)) + } else { + _, _ = h.Write([]byte{1}) + } + _, _ = h.Write([]byte{0xff}) + } + return h.Sum64() +} + func ensureAntigravityDefaultPassthrough(mapping map[string]string, model string) { if mapping == nil || model == "" { return diff --git a/backend/internal/service/account_wildcard_test.go b/backend/internal/service/account_wildcard_test.go index 86239e6e4..7782f948b 100644 --- a/backend/internal/service/account_wildcard_test.go +++ b/backend/internal/service/account_wildcard_test.go @@ -361,3 +361,25 @@ func TestAccountGetModelMapping_CacheInvalidatesOnMappingLenChange(t *testing.T) t.Fatalf("expected cache invalidated after mapping len change, got: %v", second) } } + +func TestAccountGetModelMapping_CacheInvalidatesOnInPlaceValueChange(t *testing.T) { + rawMapping := map[string]any{ + "claude-sonnet": "sonnet-a", + } + account := &Account{ + Credentials: map[string]any{ + "model_mapping": rawMapping, + }, + } + + first := account.GetModelMapping() + if first["claude-sonnet"] != "sonnet-a" { + t.Fatalf("unexpected first mapping: %v", first) + } + + rawMapping["claude-sonnet"] = "sonnet-b" + second := account.GetModelMapping() + if second["claude-sonnet"] != "sonnet-b" { + t.Fatalf("expected cache invalidated after in-place value change, got: %v", second) + } +} From a5d92d6850a2d922735bc35e2eafeba58b906727 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Fri, 27 Feb 2026 08:33:29 +0800 Subject: [PATCH 032/120] =?UTF-8?q?test(backend):=20=E8=A1=A5=E5=85=85?= =?UTF-8?q?=E8=BF=9E=E6=8E=A5=E6=B1=A0=E4=B8=8E=E8=BF=81=E7=A7=BB=E5=99=A8?= =?UTF-8?q?=E5=8D=95=E5=85=83=E6=B5=8B=E8=AF=95=E5=B9=B6=E6=8F=90=E5=8D=87?= =?UTF-8?q?=E8=A6=86=E7=9B=96=E7=8E=87?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/cmd/server/wire_gen_test.go | 81 +++ .../migrations_runner_extra_test.go | 368 ++++++++++++ .../internal/service/openai_ws_pool_test.go | 542 ++++++++++++++++++ 3 files changed, 991 insertions(+) create mode 100644 backend/cmd/server/wire_gen_test.go create mode 100644 backend/internal/repository/migrations_runner_extra_test.go diff --git a/backend/cmd/server/wire_gen_test.go b/backend/cmd/server/wire_gen_test.go new file mode 100644 index 000000000..d8881c89d --- /dev/null +++ b/backend/cmd/server/wire_gen_test.go @@ -0,0 +1,81 @@ +package main + +import ( + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/handler" + "github.com/Wei-Shaw/sub2api/internal/service" + "github.com/stretchr/testify/require" +) + +func TestProvideServiceBuildInfo(t *testing.T) { + in := handler.BuildInfo{ + Version: "v-test", + BuildType: "release", + } + out := provideServiceBuildInfo(in) + require.Equal(t, in.Version, out.Version) + require.Equal(t, in.BuildType, out.BuildType) +} + +func TestProvideCleanup_WithMinimalDependencies_NoPanic(t *testing.T) { + cfg := &config.Config{} + + oauthSvc := service.NewOAuthService(nil, nil) + openAIOAuthSvc := service.NewOpenAIOAuthService(nil, nil) + geminiOAuthSvc := service.NewGeminiOAuthService(nil, nil, nil, cfg) + antigravityOAuthSvc := service.NewAntigravityOAuthService(nil) + + tokenRefreshSvc := service.NewTokenRefreshService( + nil, + oauthSvc, + openAIOAuthSvc, + geminiOAuthSvc, + antigravityOAuthSvc, + nil, + nil, + cfg, + ) + accountExpirySvc := service.NewAccountExpiryService(nil, time.Second) + subscriptionExpirySvc := service.NewSubscriptionExpiryService(nil, time.Second) + pricingSvc := service.NewPricingService(cfg, nil) + emailQueueSvc := service.NewEmailQueueService(nil, 1) + billingCacheSvc := service.NewBillingCacheService(nil, nil, nil, cfg) + idempotencyCleanupSvc := service.NewIdempotencyCleanupService(nil, cfg) + schedulerSnapshotSvc := service.NewSchedulerSnapshotService(nil, nil, nil, nil, cfg) + opsSystemLogSinkSvc := service.NewOpsSystemLogSink(nil) + + cleanup := provideCleanup( + nil, // entClient + nil, // redis + &service.OpsMetricsCollector{}, + &service.OpsAggregationService{}, + &service.OpsAlertEvaluatorService{}, + &service.OpsCleanupService{}, + &service.OpsScheduledReportService{}, + opsSystemLogSinkSvc, + &service.SoraMediaCleanupService{}, + schedulerSnapshotSvc, + tokenRefreshSvc, + accountExpirySvc, + subscriptionExpirySvc, + &service.UsageCleanupService{}, + idempotencyCleanupSvc, + pricingSvc, + emailQueueSvc, + billingCacheSvc, + &service.UsageRecordWorkerPool{}, + &service.SubscriptionService{}, + oauthSvc, + openAIOAuthSvc, + geminiOAuthSvc, + antigravityOAuthSvc, + nil, // openAIGateway + ) + + require.NotPanics(t, func() { + cleanup() + }) +} diff --git a/backend/internal/repository/migrations_runner_extra_test.go b/backend/internal/repository/migrations_runner_extra_test.go new file mode 100644 index 000000000..cbd68b427 --- /dev/null +++ b/backend/internal/repository/migrations_runner_extra_test.go @@ -0,0 +1,368 @@ +package repository + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "errors" + "io/fs" + "strings" + "testing" + "testing/fstest" + "time" + + sqlmock "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/require" +) + +func TestApplyMigrations_NilDB(t *testing.T) { + err := ApplyMigrations(context.Background(), nil) + require.Error(t, err) + require.Contains(t, err.Error(), "nil sql db") +} + +func TestApplyMigrations_DelegatesToApplyMigrationsFS(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT pg_try_advisory_lock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnError(errors.New("lock failed")) + + err = ApplyMigrations(context.Background(), db) + require.Error(t, err) + require.Contains(t, err.Error(), "acquire migrations lock") + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestLatestMigrationBaseline(t *testing.T) { + t.Run("empty_fs_returns_baseline", func(t *testing.T) { + version, description, hash, err := latestMigrationBaseline(fstest.MapFS{}) + require.NoError(t, err) + require.Equal(t, "baseline", version) + require.Equal(t, "baseline", description) + require.Equal(t, "", hash) + }) + + t.Run("uses_latest_sorted_sql_file", func(t *testing.T) { + fsys := fstest.MapFS{ + "001_init.sql": &fstest.MapFile{Data: []byte("CREATE TABLE t1(id int);")}, + "010_final.sql": &fstest.MapFile{ + Data: []byte("CREATE TABLE t2(id int);"), + }, + } + version, description, hash, err := latestMigrationBaseline(fsys) + require.NoError(t, err) + require.Equal(t, "010_final", version) + require.Equal(t, "010_final", description) + require.Len(t, hash, 64) + }) + + t.Run("read_file_error", func(t *testing.T) { + fsys := fstest.MapFS{ + "010_bad.sql": &fstest.MapFile{Mode: fs.ModeDir}, + } + _, _, _, err := latestMigrationBaseline(fsys) + require.Error(t, err) + }) +} + +func TestIsMigrationChecksumCompatible_AdditionalCases(t *testing.T) { + require.False(t, isMigrationChecksumCompatible("unknown.sql", "db", "file")) + + var ( + name string + rule migrationChecksumCompatibilityRule + ) + for n, r := range migrationChecksumCompatibilityRules { + name = n + rule = r + break + } + require.NotEmpty(t, name) + + require.False(t, isMigrationChecksumCompatible(name, "db-not-accepted", "file-not-match")) + require.False(t, isMigrationChecksumCompatible(name, "db-not-accepted", rule.fileChecksum)) + + var accepted string + for checksum := range rule.acceptedDBChecksum { + accepted = checksum + break + } + require.NotEmpty(t, accepted) + require.True(t, isMigrationChecksumCompatible(name, accepted, rule.fileChecksum)) +} + +func TestEnsureAtlasBaselineAligned(t *testing.T) { + t.Run("skip_when_no_legacy_table", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(false)) + + err = ensureAtlasBaselineAligned(context.Background(), db, fstest.MapFS{}) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("create_atlas_and_insert_baseline_when_empty", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(false)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS atlas_schema_revisions"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(0)) + mock.ExpectExec("INSERT INTO atlas_schema_revisions"). + WithArgs("002_next", "002_next", 1, sqlmock.AnyArg()). + WillReturnResult(sqlmock.NewResult(1, 1)) + + fsys := fstest.MapFS{ + "001_init.sql": &fstest.MapFile{Data: []byte("CREATE TABLE t1(id int);")}, + "002_next.sql": &fstest.MapFile{Data: []byte("CREATE TABLE t2(id int);")}, + } + err = ensureAtlasBaselineAligned(context.Background(), db, fsys) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("error_when_checking_legacy_table", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnError(errors.New("exists failed")) + + err = ensureAtlasBaselineAligned(context.Background(), db, fstest.MapFS{}) + require.Error(t, err) + require.Contains(t, err.Error(), "check schema_migrations") + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("error_when_counting_atlas_rows", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM atlas_schema_revisions"). + WillReturnError(errors.New("count failed")) + + err = ensureAtlasBaselineAligned(context.Background(), db, fstest.MapFS{}) + require.Error(t, err) + require.Contains(t, err.Error(), "count atlas_schema_revisions") + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("error_when_creating_atlas_table", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(false)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS atlas_schema_revisions"). + WillReturnError(errors.New("create failed")) + + err = ensureAtlasBaselineAligned(context.Background(), db, fstest.MapFS{}) + require.Error(t, err) + require.Contains(t, err.Error(), "create atlas_schema_revisions") + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("error_when_inserting_baseline", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(0)) + mock.ExpectExec("INSERT INTO atlas_schema_revisions"). + WithArgs("001_init", "001_init", 1, sqlmock.AnyArg()). + WillReturnError(errors.New("insert failed")) + + fsys := fstest.MapFS{ + "001_init.sql": &fstest.MapFile{Data: []byte("CREATE TABLE t(id int);")}, + } + err = ensureAtlasBaselineAligned(context.Background(), db, fsys) + require.Error(t, err) + require.Contains(t, err.Error(), "insert atlas baseline") + require.NoError(t, mock.ExpectationsWereMet()) + }) +} + +func TestApplyMigrationsFS_ChecksumMismatchRejected(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + mock.ExpectQuery("SELECT checksum FROM schema_migrations WHERE filename = \\$1"). + WithArgs("001_init.sql"). + WillReturnRows(sqlmock.NewRows([]string{"checksum"}).AddRow("mismatched-checksum")) + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "001_init.sql": &fstest.MapFile{Data: []byte("CREATE TABLE t(id int);")}, + } + err = applyMigrationsFS(context.Background(), db, fsys) + require.Error(t, err) + require.Contains(t, err.Error(), "checksum mismatch") + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestApplyMigrationsFS_CheckMigrationQueryError(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + mock.ExpectQuery("SELECT checksum FROM schema_migrations WHERE filename = \\$1"). + WithArgs("001_err.sql"). + WillReturnError(errors.New("query failed")) + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "001_err.sql": &fstest.MapFile{Data: []byte("SELECT 1;")}, + } + err = applyMigrationsFS(context.Background(), db, fsys) + require.Error(t, err) + require.Contains(t, err.Error(), "check migration 001_err.sql") + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestApplyMigrationsFS_SkipEmptyAndAlreadyApplied(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + + alreadySQL := "CREATE TABLE t(id int);" + checksum := migrationChecksum(alreadySQL) + mock.ExpectQuery("SELECT checksum FROM schema_migrations WHERE filename = \\$1"). + WithArgs("001_already.sql"). + WillReturnRows(sqlmock.NewRows([]string{"checksum"}).AddRow(checksum)) + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "000_empty.sql": &fstest.MapFile{Data: []byte(" \n\t ")}, + "001_already.sql": &fstest.MapFile{Data: []byte(alreadySQL)}, + } + err = applyMigrationsFS(context.Background(), db, fsys) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestApplyMigrationsFS_ReadMigrationError(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "001_bad.sql": &fstest.MapFile{Mode: fs.ModeDir}, + } + err = applyMigrationsFS(context.Background(), db, fsys) + require.Error(t, err) + require.Contains(t, err.Error(), "read migration 001_bad.sql") + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestPgAdvisoryLockAndUnlock_ErrorBranches(t *testing.T) { + t.Run("context_cancelled_while_not_locked", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT pg_try_advisory_lock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnRows(sqlmock.NewRows([]string{"pg_try_advisory_lock"}).AddRow(false)) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Millisecond) + defer cancel() + err = pgAdvisoryLock(ctx, db) + require.Error(t, err) + require.Contains(t, err.Error(), "acquire migrations lock") + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("unlock_exec_error", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnError(errors.New("unlock failed")) + + err = pgAdvisoryUnlock(context.Background(), db) + require.Error(t, err) + require.Contains(t, err.Error(), "release migrations lock") + require.NoError(t, mock.ExpectationsWereMet()) + }) + + t.Run("acquire_lock_after_retry", func(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + mock.ExpectQuery("SELECT pg_try_advisory_lock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnRows(sqlmock.NewRows([]string{"pg_try_advisory_lock"}).AddRow(false)) + mock.ExpectQuery("SELECT pg_try_advisory_lock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnRows(sqlmock.NewRows([]string{"pg_try_advisory_lock"}).AddRow(true)) + + ctx, cancel := context.WithTimeout(context.Background(), migrationsLockRetryInterval*3) + defer cancel() + start := time.Now() + err = pgAdvisoryLock(ctx, db) + require.NoError(t, err) + require.GreaterOrEqual(t, time.Since(start), migrationsLockRetryInterval) + require.NoError(t, mock.ExpectationsWereMet()) + }) +} + +func migrationChecksum(content string) string { + sum := sha256.Sum256([]byte(strings.TrimSpace(content))) + return hex.EncodeToString(sum[:]) +} diff --git a/backend/internal/service/openai_ws_pool_test.go b/backend/internal/service/openai_ws_pool_test.go index f0ab646cc..662d1b5da 100644 --- a/backend/internal/service/openai_ws_pool_test.go +++ b/backend/internal/service/openai_ws_pool_test.go @@ -6,6 +6,7 @@ import ( "net/http" "strings" "sync" + "sync/atomic" "testing" "time" @@ -635,6 +636,507 @@ func TestOpenAIWSConnPool_Close(t *testing.T) { nilPool.Close() } +func TestOpenAIWSDialError_ErrorAndUnwrap(t *testing.T) { + baseErr := errors.New("boom") + dialErr := &openAIWSDialError{StatusCode: 502, Err: baseErr} + require.Contains(t, dialErr.Error(), "status=502") + require.ErrorIs(t, dialErr.Unwrap(), baseErr) + + noStatus := &openAIWSDialError{Err: baseErr} + require.Contains(t, noStatus.Error(), "boom") + + var nilDialErr *openAIWSDialError + require.Equal(t, "", nilDialErr.Error()) + require.NoError(t, nilDialErr.Unwrap()) +} + +func TestOpenAIWSConnLease_ReadWriteHelpersAndConnStats(t *testing.T) { + conn := newOpenAIWSConn("helper_conn", 1, &openAIWSFakeConn{}, http.Header{ + "X-Test": []string{" value "}, + }) + lease := &openAIWSConnLease{conn: conn} + + require.NoError(t, lease.WriteJSONContext(context.Background(), map[string]any{"type": "response.create"})) + payload, err := lease.ReadMessage(100 * time.Millisecond) + require.NoError(t, err) + require.Contains(t, string(payload), "response.completed") + + payload, err = lease.ReadMessageContext(context.Background()) + require.NoError(t, err) + require.Contains(t, string(payload), "response.completed") + + payload, err = conn.readMessageWithTimeout(100 * time.Millisecond) + require.NoError(t, err) + require.Contains(t, string(payload), "response.completed") + + require.Equal(t, "value", conn.handshakeHeader(" X-Test ")) + require.NotZero(t, conn.createdAt()) + require.NotZero(t, conn.lastUsedAt()) + require.GreaterOrEqual(t, conn.age(time.Now()), time.Duration(0)) + require.GreaterOrEqual(t, conn.idleDuration(time.Now()), time.Duration(0)) + require.False(t, conn.isLeased()) + + // 覆盖空上下文路径 + _, err = conn.readMessage(nil) + require.NoError(t, err) + + // 覆盖 nil 保护分支 + var nilConn *openAIWSConn + require.ErrorIs(t, nilConn.writeJSONWithTimeout(context.Background(), map[string]any{}, time.Second), errOpenAIWSConnClosed) + _, err = nilConn.readMessageWithTimeout(10 * time.Millisecond) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + _, err = nilConn.readMessageWithContextTimeout(context.Background(), 10*time.Millisecond) + require.ErrorIs(t, err, errOpenAIWSConnClosed) +} + +func TestOpenAIWSConnPool_PickOldestIdleAndAccountPoolLoad(t *testing.T) { + pool := &openAIWSConnPool{} + accountID := int64(404) + ap := &openAIWSAccountPool{conns: map[string]*openAIWSConn{}} + + idleOld := newOpenAIWSConn("idle_old", accountID, &openAIWSFakeConn{}, nil) + idleOld.lastUsedNano.Store(time.Now().Add(-10 * time.Minute).UnixNano()) + idleNew := newOpenAIWSConn("idle_new", accountID, &openAIWSFakeConn{}, nil) + idleNew.lastUsedNano.Store(time.Now().Add(-1 * time.Minute).UnixNano()) + leased := newOpenAIWSConn("leased", accountID, &openAIWSFakeConn{}, nil) + require.True(t, leased.tryAcquire()) + leased.waiters.Store(2) + + ap.conns[idleOld.id] = idleOld + ap.conns[idleNew.id] = idleNew + ap.conns[leased.id] = leased + + oldest := pool.pickOldestIdleConnLocked(ap) + require.NotNil(t, oldest) + require.Equal(t, idleOld.id, oldest.id) + + inflight, waiters := accountPoolLoadLocked(ap) + require.Equal(t, 1, inflight) + require.Equal(t, 2, waiters) + + pool.accounts.Store(accountID, ap) + loadInflight, loadWaiters, conns := pool.AccountPoolLoad(accountID) + require.Equal(t, 1, loadInflight) + require.Equal(t, 2, loadWaiters) + require.Equal(t, 3, conns) + + zeroInflight, zeroWaiters, zeroConns := pool.AccountPoolLoad(0) + require.Equal(t, 0, zeroInflight) + require.Equal(t, 0, zeroWaiters) + require.Equal(t, 0, zeroConns) +} + +func TestOpenAIWSConnPool_Close_WaitsWorkerGroupAndNilStopChannel(t *testing.T) { + pool := &openAIWSConnPool{} + release := make(chan struct{}) + pool.workerWg.Add(1) + go func() { + defer pool.workerWg.Done() + <-release + }() + + closed := make(chan struct{}) + go func() { + pool.Close() + close(closed) + }() + + select { + case <-closed: + t.Fatal("Close 不应在 WaitGroup 未完成时提前返回") + case <-time.After(30 * time.Millisecond): + } + + close(release) + select { + case <-closed: + case <-time.After(time.Second): + t.Fatal("Close 未等待 workerWg 完成") + } +} + +func TestOpenAIWSConnPool_Close_ClosesOnlyIdleConnections(t *testing.T) { + pool := &openAIWSConnPool{ + workerStopCh: make(chan struct{}), + } + + accountID := int64(606) + ap := &openAIWSAccountPool{ + conns: map[string]*openAIWSConn{}, + } + idle := newOpenAIWSConn("idle_conn", accountID, &openAIWSFakeConn{}, nil) + leased := newOpenAIWSConn("leased_conn", accountID, &openAIWSFakeConn{}, nil) + require.True(t, leased.tryAcquire()) + + ap.conns[idle.id] = idle + ap.conns[leased.id] = leased + pool.accounts.Store(accountID, ap) + pool.accounts.Store("invalid-key", "invalid-value") + + pool.Close() + + select { + case <-idle.closedCh: + // idle should be closed + default: + t.Fatal("空闲连接应在 Close 时被关闭") + } + + select { + case <-leased.closedCh: + t.Fatal("已租赁连接不应在 Close 时被关闭") + default: + } + + leased.release() + pool.Close() +} + +func TestOpenAIWSConnPool_RunBackgroundPingSweep_ConcurrencyLimit(t *testing.T) { + cfg := &config.Config{} + pool := newOpenAIWSConnPool(cfg) + accountID := int64(505) + ap := pool.getOrCreateAccountPool(accountID) + + var current atomic.Int32 + var maxConcurrent atomic.Int32 + release := make(chan struct{}) + for i := 0; i < 25; i++ { + conn := newOpenAIWSConn(pool.nextConnID(accountID), accountID, &openAIWSPingBlockingConn{ + current: ¤t, + maxConcurrent: &maxConcurrent, + release: release, + }, nil) + ap.mu.Lock() + ap.conns[conn.id] = conn + ap.mu.Unlock() + } + + done := make(chan struct{}) + go func() { + pool.runBackgroundPingSweep() + close(done) + }() + + require.Eventually(t, func() bool { + return maxConcurrent.Load() >= 10 + }, time.Second, 10*time.Millisecond) + + close(release) + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatal("runBackgroundPingSweep 未在释放后完成") + } + + require.LessOrEqual(t, maxConcurrent.Load(), int32(10)) +} + +func TestOpenAIWSConnLease_BasicGetterBranches(t *testing.T) { + var nilLease *openAIWSConnLease + require.Equal(t, "", nilLease.ConnID()) + require.Equal(t, time.Duration(0), nilLease.QueueWaitDuration()) + require.Equal(t, time.Duration(0), nilLease.ConnPickDuration()) + require.False(t, nilLease.Reused()) + require.Equal(t, "", nilLease.HandshakeHeader("x-test")) + require.False(t, nilLease.IsPrewarmed()) + nilLease.MarkPrewarmed() + nilLease.Release() + + conn := newOpenAIWSConn("getter_conn", 1, &openAIWSFakeConn{}, http.Header{"X-Test": []string{"ok"}}) + lease := &openAIWSConnLease{ + conn: conn, + queueWait: 3 * time.Millisecond, + connPick: 4 * time.Millisecond, + reused: true, + } + require.Equal(t, "getter_conn", lease.ConnID()) + require.Equal(t, 3*time.Millisecond, lease.QueueWaitDuration()) + require.Equal(t, 4*time.Millisecond, lease.ConnPickDuration()) + require.True(t, lease.Reused()) + require.Equal(t, "ok", lease.HandshakeHeader("x-test")) + require.False(t, lease.IsPrewarmed()) + lease.MarkPrewarmed() + require.True(t, lease.IsPrewarmed()) + lease.Release() +} + +func TestOpenAIWSConnPool_UtilityBranches(t *testing.T) { + var nilPool *openAIWSConnPool + require.Equal(t, OpenAIWSPoolMetricsSnapshot{}, nilPool.SnapshotMetrics()) + require.Equal(t, OpenAIWSTransportMetricsSnapshot{}, nilPool.SnapshotTransportMetrics()) + + pool := &openAIWSConnPool{cfg: &config.Config{}} + pool.metrics.acquireTotal.Store(7) + pool.metrics.acquireReuseTotal.Store(3) + metrics := pool.SnapshotMetrics() + require.Equal(t, int64(7), metrics.AcquireTotal) + require.Equal(t, int64(3), metrics.AcquireReuseTotal) + + // 非 transport metrics dialer 路径 + pool.clientDialer = &openAIWSFakeDialer{} + require.Equal(t, OpenAIWSTransportMetricsSnapshot{}, pool.SnapshotTransportMetrics()) + pool.setClientDialerForTest(nil) + require.NotNil(t, pool.clientDialer) + + require.Equal(t, 8, nilPool.maxConnsHardCap()) + require.False(t, nilPool.dynamicMaxConnsEnabled()) + require.Equal(t, 1.0, nilPool.maxConnsFactorByAccount(nil)) + require.Equal(t, 0, nilPool.minIdlePerAccount()) + require.Equal(t, 4, nilPool.maxIdlePerAccount()) + require.Equal(t, 256, nilPool.queueLimitPerConn()) + require.Equal(t, 0.7, nilPool.targetUtilization()) + require.Equal(t, time.Duration(0), nilPool.prewarmCooldown()) + require.Equal(t, 10*time.Second, nilPool.dialTimeout()) + + // shouldSuppressPrewarmLocked 覆盖 3 条分支 + now := time.Now() + apNilFail := &openAIWSAccountPool{prewarmFails: 1} + require.False(t, pool.shouldSuppressPrewarmLocked(apNilFail, now)) + apZeroTime := &openAIWSAccountPool{prewarmFails: 2} + require.False(t, pool.shouldSuppressPrewarmLocked(apZeroTime, now)) + require.Equal(t, 0, apZeroTime.prewarmFails) + apOldFail := &openAIWSAccountPool{prewarmFails: 2, prewarmFailAt: now.Add(-openAIWSPrewarmFailureWindow - time.Second)} + require.False(t, pool.shouldSuppressPrewarmLocked(apOldFail, now)) + apRecentFail := &openAIWSAccountPool{prewarmFails: openAIWSPrewarmFailureSuppress, prewarmFailAt: now} + require.True(t, pool.shouldSuppressPrewarmLocked(apRecentFail, now)) + + // recordConnPickDuration 的保护分支 + nilPool.recordConnPickDuration(10 * time.Millisecond) + pool.recordConnPickDuration(-10 * time.Millisecond) + require.Equal(t, int64(1), pool.metrics.connPickTotal.Load()) + + // account pool 读写分支 + require.Nil(t, nilPool.getOrCreateAccountPool(1)) + require.Nil(t, pool.getOrCreateAccountPool(0)) + pool.accounts.Store(int64(7), "invalid") + ap := pool.getOrCreateAccountPool(7) + require.NotNil(t, ap) + _, ok := pool.getAccountPool(0) + require.False(t, ok) + _, ok = pool.getAccountPool(12345) + require.False(t, ok) + pool.accounts.Store(int64(8), "bad-type") + _, ok = pool.getAccountPool(8) + require.False(t, ok) + + // health check 条件 + require.False(t, pool.shouldHealthCheckConn(nil)) + conn := newOpenAIWSConn("health", 1, &openAIWSFakeConn{}, nil) + conn.lastUsedNano.Store(time.Now().Add(-openAIWSConnHealthCheckIdle - time.Second).UnixNano()) + require.True(t, pool.shouldHealthCheckConn(conn)) +} + +func TestOpenAIWSConn_LeaseAndTimeHelpers_NilAndClosedBranches(t *testing.T) { + var nilConn *openAIWSConn + nilConn.touch() + require.Equal(t, time.Time{}, nilConn.createdAt()) + require.Equal(t, time.Time{}, nilConn.lastUsedAt()) + require.Equal(t, time.Duration(0), nilConn.idleDuration(time.Now())) + require.Equal(t, time.Duration(0), nilConn.age(time.Now())) + require.False(t, nilConn.isLeased()) + require.False(t, nilConn.isPrewarmed()) + nilConn.markPrewarmed() + + conn := newOpenAIWSConn("lease_state", 1, &openAIWSFakeConn{}, nil) + require.True(t, conn.tryAcquire()) + require.True(t, conn.isLeased()) + conn.release() + require.False(t, conn.isLeased()) + conn.close() + require.False(t, conn.tryAcquire()) + + ctx, cancel := context.WithCancel(context.Background()) + cancel() + err := conn.acquire(ctx) + require.Error(t, err) +} + +func TestOpenAIWSConnLease_ReadWriteNilConnBranches(t *testing.T) { + lease := &openAIWSConnLease{} + require.ErrorIs(t, lease.WriteJSON(map[string]any{"k": "v"}, time.Second), errOpenAIWSConnClosed) + require.ErrorIs(t, lease.WriteJSONContext(context.Background(), map[string]any{"k": "v"}), errOpenAIWSConnClosed) + _, err := lease.ReadMessage(10 * time.Millisecond) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + _, err = lease.ReadMessageContext(context.Background()) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + _, err = lease.ReadMessageWithContextTimeout(context.Background(), 10*time.Millisecond) + require.ErrorIs(t, err, errOpenAIWSConnClosed) +} + +func TestOpenAIWSConn_AdditionalGuardBranches(t *testing.T) { + var nilConn *openAIWSConn + require.False(t, nilConn.tryAcquire()) + require.ErrorIs(t, nilConn.acquire(context.Background()), errOpenAIWSConnClosed) + nilConn.release() + nilConn.close() + require.Equal(t, "", nilConn.handshakeHeader("x-test")) + + connBusy := newOpenAIWSConn("busy_ctx", 1, &openAIWSFakeConn{}, nil) + require.True(t, connBusy.tryAcquire()) + ctx, cancel := context.WithCancel(context.Background()) + cancel() + require.ErrorIs(t, connBusy.acquire(ctx), context.Canceled) + connBusy.release() + + connClosed := newOpenAIWSConn("closed_guard", 1, &openAIWSFakeConn{}, nil) + connClosed.close() + require.ErrorIs( + t, + connClosed.writeJSONWithTimeout(context.Background(), map[string]any{"k": "v"}, time.Second), + errOpenAIWSConnClosed, + ) + _, err := connClosed.readMessageWithContextTimeout(context.Background(), time.Second) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + require.ErrorIs(t, connClosed.pingWithTimeout(time.Second), errOpenAIWSConnClosed) + + connNoWS := newOpenAIWSConn("no_ws", 1, nil, nil) + require.ErrorIs(t, connNoWS.writeJSON(map[string]any{"k": "v"}, context.Background()), errOpenAIWSConnClosed) + _, err = connNoWS.readMessage(context.Background()) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + require.ErrorIs(t, connNoWS.pingWithTimeout(time.Second), errOpenAIWSConnClosed) + require.Equal(t, "", connNoWS.handshakeHeader("x-test")) + + connOK := newOpenAIWSConn("ok", 1, &openAIWSFakeConn{}, nil) + require.NoError(t, connOK.writeJSON(map[string]any{"k": "v"}, nil)) + _, err = connOK.readMessageWithContextTimeout(nil, 0) + require.NoError(t, err) + require.NoError(t, connOK.pingWithTimeout(0)) + + connZero := newOpenAIWSConn("zero_ts", 1, &openAIWSFakeConn{}, nil) + connZero.createdAtNano.Store(0) + connZero.lastUsedNano.Store(0) + require.True(t, connZero.createdAt().IsZero()) + require.True(t, connZero.lastUsedAt().IsZero()) + require.Equal(t, time.Duration(0), connZero.idleDuration(time.Now())) + require.Equal(t, time.Duration(0), connZero.age(time.Now())) + + require.Nil(t, cloneOpenAIWSAcquireRequestPtr(nil)) + copied := cloneHeader(http.Header{ + "X-Empty": []string{}, + "X-Test": []string{"v1"}, + }) + require.Contains(t, copied, "X-Empty") + require.Nil(t, copied["X-Empty"]) + require.Equal(t, "v1", copied.Get("X-Test")) + + closeOpenAIWSConns([]*openAIWSConn{nil, connOK}) +} + +func TestOpenAIWSConnLease_MarkBrokenEvictsConn(t *testing.T) { + pool := newOpenAIWSConnPool(&config.Config{}) + accountID := int64(5001) + conn := newOpenAIWSConn("broken_me", accountID, &openAIWSFakeConn{}, nil) + ap := pool.getOrCreateAccountPool(accountID) + ap.mu.Lock() + ap.conns[conn.id] = conn + ap.mu.Unlock() + + lease := &openAIWSConnLease{ + pool: pool, + accountID: accountID, + conn: conn, + } + lease.MarkBroken() + + ap.mu.Lock() + _, exists := ap.conns[conn.id] + ap.mu.Unlock() + require.False(t, exists) + require.False(t, conn.tryAcquire(), "被标记为 broken 的连接应被关闭") +} + +func TestOpenAIWSConnPool_TargetConnCountAndPrewarmBranches(t *testing.T) { + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + pool := newOpenAIWSConnPool(cfg) + + require.Equal(t, 0, pool.targetConnCountLocked(nil, 1)) + ap := &openAIWSAccountPool{conns: map[string]*openAIWSConn{}} + require.Equal(t, 0, pool.targetConnCountLocked(ap, 0)) + + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 3 + require.Equal(t, 1, pool.targetConnCountLocked(ap, 1), "minIdle 应被 maxConns 截断") + + // 覆盖 waiters>0 且 target 需要至少 len(conns)+1 的分支 + cfg.Gateway.OpenAIWS.MinIdlePerAccount = 0 + cfg.Gateway.OpenAIWS.PoolTargetUtilization = 0.9 + busy := newOpenAIWSConn("busy_target", 2, &openAIWSFakeConn{}, nil) + require.True(t, busy.tryAcquire()) + busy.waiters.Store(1) + ap.conns[busy.id] = busy + target := pool.targetConnCountLocked(ap, 4) + require.GreaterOrEqual(t, target, len(ap.conns)+1) + + // prewarm: account pool 缺失时,拨号后的连接应被关闭并提前返回 + req := openAIWSAcquireRequest{ + Account: &Account{ID: 999, Platform: PlatformOpenAI, Type: AccountTypeAPIKey}, + WSURL: "wss://example.com/v1/responses", + } + pool.prewarmConns(999, req, 1) + + // prewarm: 拨号失败分支(prewarmFails 累加) + accountID := int64(1000) + failPool := newOpenAIWSConnPool(cfg) + failPool.setClientDialerForTest(&openAIWSAlwaysFailDialer{}) + apFail := failPool.getOrCreateAccountPool(accountID) + apFail.mu.Lock() + apFail.creating = 1 + apFail.mu.Unlock() + req.Account.ID = accountID + failPool.prewarmConns(accountID, req, 1) + apFail.mu.Lock() + require.GreaterOrEqual(t, apFail.prewarmFails, 1) + apFail.mu.Unlock() +} + +func TestOpenAIWSConnPool_Acquire_ErrorBranches(t *testing.T) { + var nilPool *openAIWSConnPool + _, err := nilPool.Acquire(context.Background(), openAIWSAcquireRequest{}) + require.Error(t, err) + + pool := newOpenAIWSConnPool(&config.Config{}) + _, err = pool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: &Account{ID: 1}, + WSURL: " ", + }) + require.Error(t, err) + require.Contains(t, err.Error(), "ws url is empty") + + // target=nil 分支:池满且仅有 nil 连接 + cfg := &config.Config{} + cfg.Gateway.OpenAIWS.MaxConnsPerAccount = 1 + cfg.Gateway.OpenAIWS.QueueLimitPerConn = 1 + fullPool := newOpenAIWSConnPool(cfg) + account := &Account{ID: 2001, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + ap := fullPool.getOrCreateAccountPool(account.ID) + ap.mu.Lock() + ap.conns["nil"] = nil + ap.lastCleanupAt = time.Now() + ap.mu.Unlock() + _, err = fullPool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: account, + WSURL: "wss://example.com/v1/responses", + }) + require.ErrorIs(t, err, errOpenAIWSConnClosed) + + // queue full 分支:waiters 达上限 + account2 := &Account{ID: 2002, Platform: PlatformOpenAI, Type: AccountTypeAPIKey} + ap2 := fullPool.getOrCreateAccountPool(account2.ID) + conn := newOpenAIWSConn("queue_full", account2.ID, &openAIWSFakeConn{}, nil) + require.True(t, conn.tryAcquire()) + conn.waiters.Store(1) + ap2.mu.Lock() + ap2.conns[conn.id] = conn + ap2.lastCleanupAt = time.Now() + ap2.mu.Unlock() + _, err = fullPool.Acquire(context.Background(), openAIWSAcquireRequest{ + Account: account2, + WSURL: "wss://example.com/v1/responses", + }) + require.ErrorIs(t, err, errOpenAIWSConnQueueFull) +} + type openAIWSFakeDialer struct{} func (d *openAIWSFakeDialer) Dial( @@ -660,6 +1162,46 @@ type openAIWSAlwaysFailDialer struct { dialCount int } +type openAIWSPingBlockingConn struct { + current *atomic.Int32 + maxConcurrent *atomic.Int32 + release <-chan struct{} +} + +func (c *openAIWSPingBlockingConn) WriteJSON(context.Context, any) error { + return nil +} + +func (c *openAIWSPingBlockingConn) ReadMessage(context.Context) ([]byte, error) { + return []byte(`{"type":"response.completed","response":{"id":"resp_blocking_ping"}}`), nil +} + +func (c *openAIWSPingBlockingConn) Ping(ctx context.Context) error { + if c.current == nil || c.maxConcurrent == nil { + return nil + } + + now := c.current.Add(1) + for { + prev := c.maxConcurrent.Load() + if now <= prev || c.maxConcurrent.CompareAndSwap(prev, now) { + break + } + } + defer c.current.Add(-1) + + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.release: + return nil + } +} + +func (c *openAIWSPingBlockingConn) Close() error { + return nil +} + func (d *openAIWSCountingDialer) Dial( ctx context.Context, wsURL string, From 13d3dc2b22ebe685f95dfa9a97ce328c88b2b1c7 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Fri, 27 Feb 2026 08:35:42 +0800 Subject: [PATCH 033/120] =?UTF-8?q?feat(backend):=20=E5=AE=8C=E6=88=90?= =?UTF-8?q?=E7=83=AD=E8=B7=AF=E5=BE=84=E6=80=A7=E8=83=BD=E4=BC=98=E5=8C=96?= =?UTF-8?q?=E4=B8=8E=E5=B9=82=E7=AD=89=E8=83=BD=E5=8A=9B=E6=94=B9=E9=80=A0?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/Makefile | 11 +- backend/cmd/server/wire.go | 50 +- backend/cmd/server/wire_gen.go | 46 +- backend/ent/account.go | 32 +- backend/ent/account/account.go | 16 + backend/ent/account/where.go | 135 ++ backend/ent/account_create.go | 156 +++ backend/ent/account_update.go | 104 ++ backend/ent/client.go | 171 ++- backend/ent/ent.go | 2 + backend/ent/hook/hook.go | 12 + backend/ent/idempotencyrecord.go | 228 ++++ .../idempotencyrecord/idempotencyrecord.go | 148 ++ backend/ent/idempotencyrecord/where.go | 755 ++++++++++ backend/ent/idempotencyrecord_create.go | 1132 +++++++++++++++ backend/ent/idempotencyrecord_delete.go | 88 ++ backend/ent/idempotencyrecord_query.go | 564 ++++++++ backend/ent/idempotencyrecord_update.go | 676 +++++++++ backend/ent/intercept/intercept.go | 30 + backend/ent/migrate/schema.go | 68 +- backend/ent/mutation.go | 1216 ++++++++++++++++- backend/ent/predicate/predicate.go | 3 + backend/ent/runtime/runtime.go | 38 +- backend/ent/schema/account.go | 16 + backend/ent/schema/usage_log.go | 2 + backend/ent/schema/user_subscription.go | 2 + backend/ent/tx.go | 3 + backend/go.mod | 2 + backend/go.sum | 12 + backend/internal/config/config.go | 9 + backend/internal/config/config_test.go | 25 +- .../internal/handler/admin/account_handler.go | 28 + backend/internal/handler/failover_loop.go | 32 +- backend/internal/handler/gateway_handler.go | 67 +- backend/internal/handler/gateway_helper.go | 50 +- .../handler/gateway_helper_fastpath_test.go | 8 + .../handler/gateway_helper_hotpath_test.go | 56 +- .../internal/handler/gemini_v1beta_handler.go | 14 +- .../handler/openai_gateway_handler.go | 33 +- .../handler/openai_gateway_handler_test.go | 5 +- backend/internal/handler/ops_error_logger.go | 32 +- .../internal/handler/ops_error_logger_test.go | 20 + .../internal/handler/sora_gateway_handler.go | 4 +- backend/internal/pkg/errors/errors_test.go | 15 + backend/internal/pkg/errors/http.go | 14 +- backend/internal/pkg/httpclient/pool.go | 53 +- backend/internal/pkg/httpclient/pool_test.go | 115 ++ backend/internal/pkg/httputil/body.go | 37 + backend/internal/pkg/ip/ip.go | 72 +- backend/internal/pkg/ip/ip_test.go | 21 + backend/internal/pkg/logger/logger.go | 77 +- backend/internal/pkg/logger/slog_handler.go | 9 +- .../internal/pkg/logger/stdlog_bridge_test.go | 1 + backend/internal/pkg/tlsfingerprint/dialer.go | 16 +- backend/internal/repository/account_repo.go | 116 +- .../account_repo_integration_test.go | 32 + .../internal/repository/concurrency_cache.go | 37 + backend/internal/repository/group_repo.go | 70 +- .../repository/group_repo_integration_test.go | 75 + .../internal/repository/migrations_runner.go | 97 +- .../repository/migrations_runner_notx_test.go | 164 +++ backend/internal/repository/usage_log_repo.go | 104 +- .../server/middleware/api_key_auth.go | 2 +- .../server/middleware/api_key_auth_google.go | 24 +- .../middleware/api_key_auth_google_test.go | 170 +++ .../server/middleware/security_headers.go | 16 + .../middleware/security_headers_test.go | 20 + backend/internal/server/routes/admin.go | 1 + .../internal/service/account_usage_service.go | 78 ++ .../service/antigravity_gateway_service.go | 3 +- backend/internal/service/api_key.go | 19 +- .../service/api_key_auth_cache_impl.go | 1 + backend/internal/service/api_key_service.go | 15 + .../internal/service/billing_cache_service.go | 81 +- ...billing_cache_service_singleflight_test.go | 115 ++ .../service/billing_cache_service_test.go | 13 + .../internal/service/claude_code_validator.go | 2 +- .../internal/service/concurrency_service.go | 50 +- .../service/concurrency_service_test.go | 53 +- .../service/gateway_multiplatform_test.go | 8 + backend/internal/service/gateway_service.go | 169 ++- .../service/gemini_messages_compat_service.go | 10 +- backend/internal/service/model_rate_limit.go | 4 +- .../service/openai_account_scheduler.go | 33 +- .../service/openai_gateway_service.go | 230 +++- .../openai_gateway_service_hotpath_test.go | 16 + .../service/openai_gateway_service_test.go | 25 +- .../internal/service/openai_sticky_compat.go | 214 +++ .../service/openai_sticky_compat_test.go | 96 ++ .../internal/service/openai_ws_forwarder.go | 154 ++- .../openai_ws_forwarder_success_test.go | 15 +- backend/internal/service/openai_ws_pool.go | 31 +- backend/internal/service/ops_retry.go | 3 +- backend/internal/service/request_metadata.go | 216 +++ .../internal/service/request_metadata_test.go | 119 ++ .../service/response_header_filter.go | 13 + backend/internal/util/logredact/redact.go | 79 +- .../internal/util/logredact/redact_test.go | 45 + .../util/responseheaders/responseheaders.go | 28 +- .../responseheaders/responseheaders_test.go | 4 +- ...duler_and_usage_composite_indexes_notx.sql | 15 + backend/migrations/README.md | 20 + 102 files changed, 8762 insertions(+), 674 deletions(-) create mode 100644 backend/ent/idempotencyrecord.go create mode 100644 backend/ent/idempotencyrecord/idempotencyrecord.go create mode 100644 backend/ent/idempotencyrecord/where.go create mode 100644 backend/ent/idempotencyrecord_create.go create mode 100644 backend/ent/idempotencyrecord_delete.go create mode 100644 backend/ent/idempotencyrecord_query.go create mode 100644 backend/ent/idempotencyrecord_update.go create mode 100644 backend/internal/pkg/httpclient/pool_test.go create mode 100644 backend/internal/pkg/httputil/body.go create mode 100644 backend/internal/repository/migrations_runner_notx_test.go create mode 100644 backend/internal/service/billing_cache_service_singleflight_test.go create mode 100644 backend/internal/service/openai_sticky_compat.go create mode 100644 backend/internal/service/openai_sticky_compat_test.go create mode 100644 backend/internal/service/request_metadata.go create mode 100644 backend/internal/service/request_metadata_test.go create mode 100644 backend/internal/service/response_header_filter.go create mode 100644 backend/migrations/062_add_scheduler_and_usage_composite_indexes_notx.sql diff --git a/backend/Makefile b/backend/Makefile index 89db11041..7084ccb93 100644 --- a/backend/Makefile +++ b/backend/Makefile @@ -1,7 +1,14 @@ -.PHONY: build test test-unit test-integration test-e2e +.PHONY: build generate test test-unit test-integration test-e2e + +VERSION ?= $(shell tr -d '\r\n' < ./cmd/server/VERSION) +LDFLAGS ?= -s -w -X main.Version=$(VERSION) build: - go build -o bin/server ./cmd/server + CGO_ENABLED=0 go build -ldflags="$(LDFLAGS)" -trimpath -o bin/server ./cmd/server + +generate: + go generate ./ent + go generate ./cmd/server test: go test ./... diff --git a/backend/cmd/server/wire.go b/backend/cmd/server/wire.go index f448cd730..cbf89ba3b 100644 --- a/backend/cmd/server/wire.go +++ b/backend/cmd/server/wire.go @@ -7,6 +7,7 @@ import ( "context" "log" "net/http" + "sync" "time" "github.com/Wei-Shaw/sub2api/ent" @@ -90,11 +91,13 @@ func provideCleanup( ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - // Cleanup steps in reverse dependency order - cleanupSteps := []struct { + type cleanupStep struct { name string fn func() error - }{ + } + + // 应用层清理步骤可并行执行,基础设施资源(Redis/Ent)最后按顺序关闭。 + parallelSteps := []cleanupStep{ {"OpsScheduledReportService", func() error { if opsScheduledReport != nil { opsScheduledReport.Stop() @@ -213,23 +216,54 @@ func provideCleanup( } return nil }}, + } + + infraSteps := []cleanupStep{ {"Redis", func() error { + if rdb == nil { + return nil + } return rdb.Close() }}, {"Ent", func() error { + if entClient == nil { + return nil + } return entClient.Close() }}, } - for _, step := range cleanupSteps { - if err := step.fn(); err != nil { - log.Printf("[Cleanup] %s failed: %v", step.name, err) - // Continue with remaining cleanup steps even if one fails - } else { + runParallel := func(steps []cleanupStep) { + var wg sync.WaitGroup + for i := range steps { + step := steps[i] + wg.Add(1) + go func() { + defer wg.Done() + if err := step.fn(); err != nil { + log.Printf("[Cleanup] %s failed: %v", step.name, err) + return + } + log.Printf("[Cleanup] %s succeeded", step.name) + }() + } + wg.Wait() + } + + runSequential := func(steps []cleanupStep) { + for i := range steps { + step := steps[i] + if err := step.fn(); err != nil { + log.Printf("[Cleanup] %s failed: %v", step.name, err) + continue + } log.Printf("[Cleanup] %s succeeded", step.name) } } + runParallel(parallelSteps) + runSequential(infraSteps) + // Check if context timed out select { case <-ctx.Done(): diff --git a/backend/cmd/server/wire_gen.go b/backend/cmd/server/wire_gen.go index 045fec477..46eeec460 100644 --- a/backend/cmd/server/wire_gen.go +++ b/backend/cmd/server/wire_gen.go @@ -19,6 +19,7 @@ import ( "github.com/redis/go-redis/v9" "log" "net/http" + "sync" "time" ) @@ -265,10 +266,12 @@ func provideCleanup( ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) defer cancel() - cleanupSteps := []struct { + type cleanupStep struct { name string fn func() error - }{ + } + + parallelSteps := []cleanupStep{ {"OpsScheduledReportService", func() error { if opsScheduledReport != nil { opsScheduledReport.Stop() @@ -387,23 +390,54 @@ func provideCleanup( } return nil }}, + } + + infraSteps := []cleanupStep{ {"Redis", func() error { + if rdb == nil { + return nil + } return rdb.Close() }}, {"Ent", func() error { + if entClient == nil { + return nil + } return entClient.Close() }}, } - for _, step := range cleanupSteps { - if err := step.fn(); err != nil { - log.Printf("[Cleanup] %s failed: %v", step.name, err) + runParallel := func(steps []cleanupStep) { + var wg sync.WaitGroup + for i := range steps { + step := steps[i] + wg.Add(1) + go func() { + defer wg.Done() + if err := step.fn(); err != nil { + log.Printf("[Cleanup] %s failed: %v", step.name, err) + return + } + log.Printf("[Cleanup] %s succeeded", step.name) + }() + } + wg.Wait() + } - } else { + runSequential := func(steps []cleanupStep) { + for i := range steps { + step := steps[i] + if err := step.fn(); err != nil { + log.Printf("[Cleanup] %s failed: %v", step.name, err) + continue + } log.Printf("[Cleanup] %s succeeded", step.name) } } + runParallel(parallelSteps) + runSequential(infraSteps) + select { case <-ctx.Done(): log.Printf("[Cleanup] Warning: cleanup timed out after 10 seconds") diff --git a/backend/ent/account.go b/backend/ent/account.go index 038aa7e59..c77002b32 100644 --- a/backend/ent/account.go +++ b/backend/ent/account.go @@ -63,6 +63,10 @@ type Account struct { RateLimitResetAt *time.Time `json:"rate_limit_reset_at,omitempty"` // OverloadUntil holds the value of the "overload_until" field. OverloadUntil *time.Time `json:"overload_until,omitempty"` + // TempUnschedulableUntil holds the value of the "temp_unschedulable_until" field. + TempUnschedulableUntil *time.Time `json:"temp_unschedulable_until,omitempty"` + // TempUnschedulableReason holds the value of the "temp_unschedulable_reason" field. + TempUnschedulableReason *string `json:"temp_unschedulable_reason,omitempty"` // SessionWindowStart holds the value of the "session_window_start" field. SessionWindowStart *time.Time `json:"session_window_start,omitempty"` // SessionWindowEnd holds the value of the "session_window_end" field. @@ -141,9 +145,9 @@ func (*Account) scanValues(columns []string) ([]any, error) { values[i] = new(sql.NullFloat64) case account.FieldID, account.FieldProxyID, account.FieldConcurrency, account.FieldPriority: values[i] = new(sql.NullInt64) - case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldSessionWindowStatus: + case account.FieldName, account.FieldNotes, account.FieldPlatform, account.FieldType, account.FieldStatus, account.FieldErrorMessage, account.FieldTempUnschedulableReason, account.FieldSessionWindowStatus: values[i] = new(sql.NullString) - case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd: + case account.FieldCreatedAt, account.FieldUpdatedAt, account.FieldDeletedAt, account.FieldLastUsedAt, account.FieldExpiresAt, account.FieldRateLimitedAt, account.FieldRateLimitResetAt, account.FieldOverloadUntil, account.FieldTempUnschedulableUntil, account.FieldSessionWindowStart, account.FieldSessionWindowEnd: values[i] = new(sql.NullTime) default: values[i] = new(sql.UnknownType) @@ -311,6 +315,20 @@ func (_m *Account) assignValues(columns []string, values []any) error { _m.OverloadUntil = new(time.Time) *_m.OverloadUntil = value.Time } + case account.FieldTempUnschedulableUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field temp_unschedulable_until", values[i]) + } else if value.Valid { + _m.TempUnschedulableUntil = new(time.Time) + *_m.TempUnschedulableUntil = value.Time + } + case account.FieldTempUnschedulableReason: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field temp_unschedulable_reason", values[i]) + } else if value.Valid { + _m.TempUnschedulableReason = new(string) + *_m.TempUnschedulableReason = value.String + } case account.FieldSessionWindowStart: if value, ok := values[i].(*sql.NullTime); !ok { return fmt.Errorf("unexpected type %T for field session_window_start", values[i]) @@ -472,6 +490,16 @@ func (_m *Account) String() string { builder.WriteString(v.Format(time.ANSIC)) } builder.WriteString(", ") + if v := _m.TempUnschedulableUntil; v != nil { + builder.WriteString("temp_unschedulable_until=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + if v := _m.TempUnschedulableReason; v != nil { + builder.WriteString("temp_unschedulable_reason=") + builder.WriteString(*v) + } + builder.WriteString(", ") if v := _m.SessionWindowStart; v != nil { builder.WriteString("session_window_start=") builder.WriteString(v.Format(time.ANSIC)) diff --git a/backend/ent/account/account.go b/backend/ent/account/account.go index 73c0e8c25..1fc34620d 100644 --- a/backend/ent/account/account.go +++ b/backend/ent/account/account.go @@ -59,6 +59,10 @@ const ( FieldRateLimitResetAt = "rate_limit_reset_at" // FieldOverloadUntil holds the string denoting the overload_until field in the database. FieldOverloadUntil = "overload_until" + // FieldTempUnschedulableUntil holds the string denoting the temp_unschedulable_until field in the database. + FieldTempUnschedulableUntil = "temp_unschedulable_until" + // FieldTempUnschedulableReason holds the string denoting the temp_unschedulable_reason field in the database. + FieldTempUnschedulableReason = "temp_unschedulable_reason" // FieldSessionWindowStart holds the string denoting the session_window_start field in the database. FieldSessionWindowStart = "session_window_start" // FieldSessionWindowEnd holds the string denoting the session_window_end field in the database. @@ -128,6 +132,8 @@ var Columns = []string{ FieldRateLimitedAt, FieldRateLimitResetAt, FieldOverloadUntil, + FieldTempUnschedulableUntil, + FieldTempUnschedulableReason, FieldSessionWindowStart, FieldSessionWindowEnd, FieldSessionWindowStatus, @@ -299,6 +305,16 @@ func ByOverloadUntil(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldOverloadUntil, opts...).ToFunc() } +// ByTempUnschedulableUntil orders the results by the temp_unschedulable_until field. +func ByTempUnschedulableUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTempUnschedulableUntil, opts...).ToFunc() +} + +// ByTempUnschedulableReason orders the results by the temp_unschedulable_reason field. +func ByTempUnschedulableReason(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldTempUnschedulableReason, opts...).ToFunc() +} + // BySessionWindowStart orders the results by the session_window_start field. func BySessionWindowStart(opts ...sql.OrderTermOption) OrderOption { return sql.OrderByField(FieldSessionWindowStart, opts...).ToFunc() diff --git a/backend/ent/account/where.go b/backend/ent/account/where.go index dea1127a2..54db1dcb1 100644 --- a/backend/ent/account/where.go +++ b/backend/ent/account/where.go @@ -155,6 +155,16 @@ func OverloadUntil(v time.Time) predicate.Account { return predicate.Account(sql.FieldEQ(FieldOverloadUntil, v)) } +// TempUnschedulableUntil applies equality check predicate on the "temp_unschedulable_until" field. It's identical to TempUnschedulableUntilEQ. +func TempUnschedulableUntil(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableReason applies equality check predicate on the "temp_unschedulable_reason" field. It's identical to TempUnschedulableReasonEQ. +func TempUnschedulableReason(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldTempUnschedulableReason, v)) +} + // SessionWindowStart applies equality check predicate on the "session_window_start" field. It's identical to SessionWindowStartEQ. func SessionWindowStart(v time.Time) predicate.Account { return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v)) @@ -1130,6 +1140,131 @@ func OverloadUntilNotNil() predicate.Account { return predicate.Account(sql.FieldNotNull(FieldOverloadUntil)) } +// TempUnschedulableUntilEQ applies the EQ predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableUntilNEQ applies the NEQ predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilNEQ(v time.Time) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableUntilIn applies the In predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldIn(FieldTempUnschedulableUntil, vs...)) +} + +// TempUnschedulableUntilNotIn applies the NotIn predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilNotIn(vs ...time.Time) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldTempUnschedulableUntil, vs...)) +} + +// TempUnschedulableUntilGT applies the GT predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilGT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGT(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableUntilGTE applies the GTE predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilGTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableUntilLT applies the LT predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilLT(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLT(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableUntilLTE applies the LTE predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilLTE(v time.Time) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldTempUnschedulableUntil, v)) +} + +// TempUnschedulableUntilIsNil applies the IsNil predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldTempUnschedulableUntil)) +} + +// TempUnschedulableUntilNotNil applies the NotNil predicate on the "temp_unschedulable_until" field. +func TempUnschedulableUntilNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldTempUnschedulableUntil)) +} + +// TempUnschedulableReasonEQ applies the EQ predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonEQ(v string) predicate.Account { + return predicate.Account(sql.FieldEQ(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonNEQ applies the NEQ predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonNEQ(v string) predicate.Account { + return predicate.Account(sql.FieldNEQ(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonIn applies the In predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldIn(FieldTempUnschedulableReason, vs...)) +} + +// TempUnschedulableReasonNotIn applies the NotIn predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonNotIn(vs ...string) predicate.Account { + return predicate.Account(sql.FieldNotIn(FieldTempUnschedulableReason, vs...)) +} + +// TempUnschedulableReasonGT applies the GT predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonGT(v string) predicate.Account { + return predicate.Account(sql.FieldGT(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonGTE applies the GTE predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonGTE(v string) predicate.Account { + return predicate.Account(sql.FieldGTE(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonLT applies the LT predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonLT(v string) predicate.Account { + return predicate.Account(sql.FieldLT(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonLTE applies the LTE predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonLTE(v string) predicate.Account { + return predicate.Account(sql.FieldLTE(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonContains applies the Contains predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonContains(v string) predicate.Account { + return predicate.Account(sql.FieldContains(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonHasPrefix applies the HasPrefix predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonHasPrefix(v string) predicate.Account { + return predicate.Account(sql.FieldHasPrefix(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonHasSuffix applies the HasSuffix predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonHasSuffix(v string) predicate.Account { + return predicate.Account(sql.FieldHasSuffix(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonIsNil applies the IsNil predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonIsNil() predicate.Account { + return predicate.Account(sql.FieldIsNull(FieldTempUnschedulableReason)) +} + +// TempUnschedulableReasonNotNil applies the NotNil predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonNotNil() predicate.Account { + return predicate.Account(sql.FieldNotNull(FieldTempUnschedulableReason)) +} + +// TempUnschedulableReasonEqualFold applies the EqualFold predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonEqualFold(v string) predicate.Account { + return predicate.Account(sql.FieldEqualFold(FieldTempUnschedulableReason, v)) +} + +// TempUnschedulableReasonContainsFold applies the ContainsFold predicate on the "temp_unschedulable_reason" field. +func TempUnschedulableReasonContainsFold(v string) predicate.Account { + return predicate.Account(sql.FieldContainsFold(FieldTempUnschedulableReason, v)) +} + // SessionWindowStartEQ applies the EQ predicate on the "session_window_start" field. func SessionWindowStartEQ(v time.Time) predicate.Account { return predicate.Account(sql.FieldEQ(FieldSessionWindowStart, v)) diff --git a/backend/ent/account_create.go b/backend/ent/account_create.go index 42a561cf0..963ffee88 100644 --- a/backend/ent/account_create.go +++ b/backend/ent/account_create.go @@ -293,6 +293,34 @@ func (_c *AccountCreate) SetNillableOverloadUntil(v *time.Time) *AccountCreate { return _c } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (_c *AccountCreate) SetTempUnschedulableUntil(v time.Time) *AccountCreate { + _c.mutation.SetTempUnschedulableUntil(v) + return _c +} + +// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil. +func (_c *AccountCreate) SetNillableTempUnschedulableUntil(v *time.Time) *AccountCreate { + if v != nil { + _c.SetTempUnschedulableUntil(*v) + } + return _c +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (_c *AccountCreate) SetTempUnschedulableReason(v string) *AccountCreate { + _c.mutation.SetTempUnschedulableReason(v) + return _c +} + +// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil. +func (_c *AccountCreate) SetNillableTempUnschedulableReason(v *string) *AccountCreate { + if v != nil { + _c.SetTempUnschedulableReason(*v) + } + return _c +} + // SetSessionWindowStart sets the "session_window_start" field. func (_c *AccountCreate) SetSessionWindowStart(v time.Time) *AccountCreate { _c.mutation.SetSessionWindowStart(v) @@ -639,6 +667,14 @@ func (_c *AccountCreate) createSpec() (*Account, *sqlgraph.CreateSpec) { _spec.SetField(account.FieldOverloadUntil, field.TypeTime, value) _node.OverloadUntil = &value } + if value, ok := _c.mutation.TempUnschedulableUntil(); ok { + _spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value) + _node.TempUnschedulableUntil = &value + } + if value, ok := _c.mutation.TempUnschedulableReason(); ok { + _spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value) + _node.TempUnschedulableReason = &value + } if value, ok := _c.mutation.SessionWindowStart(); ok { _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) _node.SessionWindowStart = &value @@ -1080,6 +1116,42 @@ func (u *AccountUpsert) ClearOverloadUntil() *AccountUpsert { return u } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (u *AccountUpsert) SetTempUnschedulableUntil(v time.Time) *AccountUpsert { + u.Set(account.FieldTempUnschedulableUntil, v) + return u +} + +// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create. +func (u *AccountUpsert) UpdateTempUnschedulableUntil() *AccountUpsert { + u.SetExcluded(account.FieldTempUnschedulableUntil) + return u +} + +// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field. +func (u *AccountUpsert) ClearTempUnschedulableUntil() *AccountUpsert { + u.SetNull(account.FieldTempUnschedulableUntil) + return u +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (u *AccountUpsert) SetTempUnschedulableReason(v string) *AccountUpsert { + u.Set(account.FieldTempUnschedulableReason, v) + return u +} + +// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create. +func (u *AccountUpsert) UpdateTempUnschedulableReason() *AccountUpsert { + u.SetExcluded(account.FieldTempUnschedulableReason) + return u +} + +// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field. +func (u *AccountUpsert) ClearTempUnschedulableReason() *AccountUpsert { + u.SetNull(account.FieldTempUnschedulableReason) + return u +} + // SetSessionWindowStart sets the "session_window_start" field. func (u *AccountUpsert) SetSessionWindowStart(v time.Time) *AccountUpsert { u.Set(account.FieldSessionWindowStart, v) @@ -1557,6 +1629,48 @@ func (u *AccountUpsertOne) ClearOverloadUntil() *AccountUpsertOne { }) } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (u *AccountUpsertOne) SetTempUnschedulableUntil(v time.Time) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetTempUnschedulableUntil(v) + }) +} + +// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateTempUnschedulableUntil() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateTempUnschedulableUntil() + }) +} + +// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field. +func (u *AccountUpsertOne) ClearTempUnschedulableUntil() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearTempUnschedulableUntil() + }) +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (u *AccountUpsertOne) SetTempUnschedulableReason(v string) *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.SetTempUnschedulableReason(v) + }) +} + +// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create. +func (u *AccountUpsertOne) UpdateTempUnschedulableReason() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.UpdateTempUnschedulableReason() + }) +} + +// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field. +func (u *AccountUpsertOne) ClearTempUnschedulableReason() *AccountUpsertOne { + return u.Update(func(s *AccountUpsert) { + s.ClearTempUnschedulableReason() + }) +} + // SetSessionWindowStart sets the "session_window_start" field. func (u *AccountUpsertOne) SetSessionWindowStart(v time.Time) *AccountUpsertOne { return u.Update(func(s *AccountUpsert) { @@ -2209,6 +2323,48 @@ func (u *AccountUpsertBulk) ClearOverloadUntil() *AccountUpsertBulk { }) } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (u *AccountUpsertBulk) SetTempUnschedulableUntil(v time.Time) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetTempUnschedulableUntil(v) + }) +} + +// UpdateTempUnschedulableUntil sets the "temp_unschedulable_until" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateTempUnschedulableUntil() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateTempUnschedulableUntil() + }) +} + +// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field. +func (u *AccountUpsertBulk) ClearTempUnschedulableUntil() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearTempUnschedulableUntil() + }) +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (u *AccountUpsertBulk) SetTempUnschedulableReason(v string) *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.SetTempUnschedulableReason(v) + }) +} + +// UpdateTempUnschedulableReason sets the "temp_unschedulable_reason" field to the value that was provided on create. +func (u *AccountUpsertBulk) UpdateTempUnschedulableReason() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.UpdateTempUnschedulableReason() + }) +} + +// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field. +func (u *AccountUpsertBulk) ClearTempUnschedulableReason() *AccountUpsertBulk { + return u.Update(func(s *AccountUpsert) { + s.ClearTempUnschedulableReason() + }) +} + // SetSessionWindowStart sets the "session_window_start" field. func (u *AccountUpsertBulk) SetSessionWindowStart(v time.Time) *AccountUpsertBulk { return u.Update(func(s *AccountUpsert) { diff --git a/backend/ent/account_update.go b/backend/ent/account_update.go index 63fab096d..875888e04 100644 --- a/backend/ent/account_update.go +++ b/backend/ent/account_update.go @@ -376,6 +376,46 @@ func (_u *AccountUpdate) ClearOverloadUntil() *AccountUpdate { return _u } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (_u *AccountUpdate) SetTempUnschedulableUntil(v time.Time) *AccountUpdate { + _u.mutation.SetTempUnschedulableUntil(v) + return _u +} + +// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableTempUnschedulableUntil(v *time.Time) *AccountUpdate { + if v != nil { + _u.SetTempUnschedulableUntil(*v) + } + return _u +} + +// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field. +func (_u *AccountUpdate) ClearTempUnschedulableUntil() *AccountUpdate { + _u.mutation.ClearTempUnschedulableUntil() + return _u +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (_u *AccountUpdate) SetTempUnschedulableReason(v string) *AccountUpdate { + _u.mutation.SetTempUnschedulableReason(v) + return _u +} + +// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil. +func (_u *AccountUpdate) SetNillableTempUnschedulableReason(v *string) *AccountUpdate { + if v != nil { + _u.SetTempUnschedulableReason(*v) + } + return _u +} + +// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field. +func (_u *AccountUpdate) ClearTempUnschedulableReason() *AccountUpdate { + _u.mutation.ClearTempUnschedulableReason() + return _u +} + // SetSessionWindowStart sets the "session_window_start" field. func (_u *AccountUpdate) SetSessionWindowStart(v time.Time) *AccountUpdate { _u.mutation.SetSessionWindowStart(v) @@ -701,6 +741,18 @@ func (_u *AccountUpdate) sqlSave(ctx context.Context) (_node int, err error) { if _u.mutation.OverloadUntilCleared() { _spec.ClearField(account.FieldOverloadUntil, field.TypeTime) } + if value, ok := _u.mutation.TempUnschedulableUntil(); ok { + _spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value) + } + if _u.mutation.TempUnschedulableUntilCleared() { + _spec.ClearField(account.FieldTempUnschedulableUntil, field.TypeTime) + } + if value, ok := _u.mutation.TempUnschedulableReason(); ok { + _spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value) + } + if _u.mutation.TempUnschedulableReasonCleared() { + _spec.ClearField(account.FieldTempUnschedulableReason, field.TypeString) + } if value, ok := _u.mutation.SessionWindowStart(); ok { _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) } @@ -1215,6 +1267,46 @@ func (_u *AccountUpdateOne) ClearOverloadUntil() *AccountUpdateOne { return _u } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (_u *AccountUpdateOne) SetTempUnschedulableUntil(v time.Time) *AccountUpdateOne { + _u.mutation.SetTempUnschedulableUntil(v) + return _u +} + +// SetNillableTempUnschedulableUntil sets the "temp_unschedulable_until" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableTempUnschedulableUntil(v *time.Time) *AccountUpdateOne { + if v != nil { + _u.SetTempUnschedulableUntil(*v) + } + return _u +} + +// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field. +func (_u *AccountUpdateOne) ClearTempUnschedulableUntil() *AccountUpdateOne { + _u.mutation.ClearTempUnschedulableUntil() + return _u +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (_u *AccountUpdateOne) SetTempUnschedulableReason(v string) *AccountUpdateOne { + _u.mutation.SetTempUnschedulableReason(v) + return _u +} + +// SetNillableTempUnschedulableReason sets the "temp_unschedulable_reason" field if the given value is not nil. +func (_u *AccountUpdateOne) SetNillableTempUnschedulableReason(v *string) *AccountUpdateOne { + if v != nil { + _u.SetTempUnschedulableReason(*v) + } + return _u +} + +// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field. +func (_u *AccountUpdateOne) ClearTempUnschedulableReason() *AccountUpdateOne { + _u.mutation.ClearTempUnschedulableReason() + return _u +} + // SetSessionWindowStart sets the "session_window_start" field. func (_u *AccountUpdateOne) SetSessionWindowStart(v time.Time) *AccountUpdateOne { _u.mutation.SetSessionWindowStart(v) @@ -1570,6 +1662,18 @@ func (_u *AccountUpdateOne) sqlSave(ctx context.Context) (_node *Account, err er if _u.mutation.OverloadUntilCleared() { _spec.ClearField(account.FieldOverloadUntil, field.TypeTime) } + if value, ok := _u.mutation.TempUnschedulableUntil(); ok { + _spec.SetField(account.FieldTempUnschedulableUntil, field.TypeTime, value) + } + if _u.mutation.TempUnschedulableUntilCleared() { + _spec.ClearField(account.FieldTempUnschedulableUntil, field.TypeTime) + } + if value, ok := _u.mutation.TempUnschedulableReason(); ok { + _spec.SetField(account.FieldTempUnschedulableReason, field.TypeString, value) + } + if _u.mutation.TempUnschedulableReasonCleared() { + _spec.ClearField(account.FieldTempUnschedulableReason, field.TypeString) + } if value, ok := _u.mutation.SessionWindowStart(); ok { _spec.SetField(account.FieldSessionWindowStart, field.TypeTime, value) } diff --git a/backend/ent/client.go b/backend/ent/client.go index 504c17557..7ebbaa322 100644 --- a/backend/ent/client.go +++ b/backend/ent/client.go @@ -22,6 +22,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" @@ -58,6 +59,8 @@ type Client struct { ErrorPassthroughRule *ErrorPassthroughRuleClient // Group is the client for interacting with the Group builders. Group *GroupClient + // IdempotencyRecord is the client for interacting with the IdempotencyRecord builders. + IdempotencyRecord *IdempotencyRecordClient // PromoCode is the client for interacting with the PromoCode builders. PromoCode *PromoCodeClient // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. @@ -102,6 +105,7 @@ func (c *Client) init() { c.AnnouncementRead = NewAnnouncementReadClient(c.config) c.ErrorPassthroughRule = NewErrorPassthroughRuleClient(c.config) c.Group = NewGroupClient(c.config) + c.IdempotencyRecord = NewIdempotencyRecordClient(c.config) c.PromoCode = NewPromoCodeClient(c.config) c.PromoCodeUsage = NewPromoCodeUsageClient(c.config) c.Proxy = NewProxyClient(c.config) @@ -214,6 +218,7 @@ func (c *Client) Tx(ctx context.Context) (*Tx, error) { AnnouncementRead: NewAnnouncementReadClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), Group: NewGroupClient(cfg), + IdempotencyRecord: NewIdempotencyRecordClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), @@ -253,6 +258,7 @@ func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) AnnouncementRead: NewAnnouncementReadClient(cfg), ErrorPassthroughRule: NewErrorPassthroughRuleClient(cfg), Group: NewGroupClient(cfg), + IdempotencyRecord: NewIdempotencyRecordClient(cfg), PromoCode: NewPromoCodeClient(cfg), PromoCodeUsage: NewPromoCodeUsageClient(cfg), Proxy: NewProxyClient(cfg), @@ -296,10 +302,10 @@ func (c *Client) Close() error { func (c *Client) Use(hooks ...Hook) { for _, n := range []interface{ Use(...Hook) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, - c.ErrorPassthroughRule, c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, - c.RedeemCode, c.SecuritySecret, c.Setting, c.UsageCleanupTask, c.UsageLog, - c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, - c.UserSubscription, + c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.PromoCode, + c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Use(hooks...) } @@ -310,10 +316,10 @@ func (c *Client) Use(hooks ...Hook) { func (c *Client) Intercept(interceptors ...Interceptor) { for _, n := range []interface{ Intercept(...Interceptor) }{ c.APIKey, c.Account, c.AccountGroup, c.Announcement, c.AnnouncementRead, - c.ErrorPassthroughRule, c.Group, c.PromoCode, c.PromoCodeUsage, c.Proxy, - c.RedeemCode, c.SecuritySecret, c.Setting, c.UsageCleanupTask, c.UsageLog, - c.User, c.UserAllowedGroup, c.UserAttributeDefinition, c.UserAttributeValue, - c.UserSubscription, + c.ErrorPassthroughRule, c.Group, c.IdempotencyRecord, c.PromoCode, + c.PromoCodeUsage, c.Proxy, c.RedeemCode, c.SecuritySecret, c.Setting, + c.UsageCleanupTask, c.UsageLog, c.User, c.UserAllowedGroup, + c.UserAttributeDefinition, c.UserAttributeValue, c.UserSubscription, } { n.Intercept(interceptors...) } @@ -336,6 +342,8 @@ func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { return c.ErrorPassthroughRule.mutate(ctx, m) case *GroupMutation: return c.Group.mutate(ctx, m) + case *IdempotencyRecordMutation: + return c.IdempotencyRecord.mutate(ctx, m) case *PromoCodeMutation: return c.PromoCode.mutate(ctx, m) case *PromoCodeUsageMutation: @@ -1575,6 +1583,139 @@ func (c *GroupClient) mutate(ctx context.Context, m *GroupMutation) (Value, erro } } +// IdempotencyRecordClient is a client for the IdempotencyRecord schema. +type IdempotencyRecordClient struct { + config +} + +// NewIdempotencyRecordClient returns a client for the IdempotencyRecord from the given config. +func NewIdempotencyRecordClient(c config) *IdempotencyRecordClient { + return &IdempotencyRecordClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `idempotencyrecord.Hooks(f(g(h())))`. +func (c *IdempotencyRecordClient) Use(hooks ...Hook) { + c.hooks.IdempotencyRecord = append(c.hooks.IdempotencyRecord, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `idempotencyrecord.Intercept(f(g(h())))`. +func (c *IdempotencyRecordClient) Intercept(interceptors ...Interceptor) { + c.inters.IdempotencyRecord = append(c.inters.IdempotencyRecord, interceptors...) +} + +// Create returns a builder for creating a IdempotencyRecord entity. +func (c *IdempotencyRecordClient) Create() *IdempotencyRecordCreate { + mutation := newIdempotencyRecordMutation(c.config, OpCreate) + return &IdempotencyRecordCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of IdempotencyRecord entities. +func (c *IdempotencyRecordClient) CreateBulk(builders ...*IdempotencyRecordCreate) *IdempotencyRecordCreateBulk { + return &IdempotencyRecordCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *IdempotencyRecordClient) MapCreateBulk(slice any, setFunc func(*IdempotencyRecordCreate, int)) *IdempotencyRecordCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &IdempotencyRecordCreateBulk{err: fmt.Errorf("calling to IdempotencyRecordClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*IdempotencyRecordCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &IdempotencyRecordCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for IdempotencyRecord. +func (c *IdempotencyRecordClient) Update() *IdempotencyRecordUpdate { + mutation := newIdempotencyRecordMutation(c.config, OpUpdate) + return &IdempotencyRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *IdempotencyRecordClient) UpdateOne(_m *IdempotencyRecord) *IdempotencyRecordUpdateOne { + mutation := newIdempotencyRecordMutation(c.config, OpUpdateOne, withIdempotencyRecord(_m)) + return &IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *IdempotencyRecordClient) UpdateOneID(id int64) *IdempotencyRecordUpdateOne { + mutation := newIdempotencyRecordMutation(c.config, OpUpdateOne, withIdempotencyRecordID(id)) + return &IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for IdempotencyRecord. +func (c *IdempotencyRecordClient) Delete() *IdempotencyRecordDelete { + mutation := newIdempotencyRecordMutation(c.config, OpDelete) + return &IdempotencyRecordDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *IdempotencyRecordClient) DeleteOne(_m *IdempotencyRecord) *IdempotencyRecordDeleteOne { + return c.DeleteOneID(_m.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *IdempotencyRecordClient) DeleteOneID(id int64) *IdempotencyRecordDeleteOne { + builder := c.Delete().Where(idempotencyrecord.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &IdempotencyRecordDeleteOne{builder} +} + +// Query returns a query builder for IdempotencyRecord. +func (c *IdempotencyRecordClient) Query() *IdempotencyRecordQuery { + return &IdempotencyRecordQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeIdempotencyRecord}, + inters: c.Interceptors(), + } +} + +// Get returns a IdempotencyRecord entity by its id. +func (c *IdempotencyRecordClient) Get(ctx context.Context, id int64) (*IdempotencyRecord, error) { + return c.Query().Where(idempotencyrecord.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *IdempotencyRecordClient) GetX(ctx context.Context, id int64) *IdempotencyRecord { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *IdempotencyRecordClient) Hooks() []Hook { + return c.hooks.IdempotencyRecord +} + +// Interceptors returns the client interceptors. +func (c *IdempotencyRecordClient) Interceptors() []Interceptor { + return c.inters.IdempotencyRecord +} + +func (c *IdempotencyRecordClient) mutate(ctx context.Context, m *IdempotencyRecordMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&IdempotencyRecordCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&IdempotencyRecordUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&IdempotencyRecordUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&IdempotencyRecordDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown IdempotencyRecord mutation op: %q", m.Op()) + } +} + // PromoCodeClient is a client for the PromoCode schema. type PromoCodeClient struct { config @@ -3747,15 +3888,17 @@ func (c *UserSubscriptionClient) mutate(ctx context.Context, m *UserSubscription type ( hooks struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, - ErrorPassthroughRule, Group, PromoCode, PromoCodeUsage, Proxy, RedeemCode, - SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, - UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Hook + ErrorPassthroughRule, Group, IdempotencyRecord, PromoCode, PromoCodeUsage, + Proxy, RedeemCode, SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Hook } inters struct { APIKey, Account, AccountGroup, Announcement, AnnouncementRead, - ErrorPassthroughRule, Group, PromoCode, PromoCodeUsage, Proxy, RedeemCode, - SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, UserAllowedGroup, - UserAttributeDefinition, UserAttributeValue, UserSubscription []ent.Interceptor + ErrorPassthroughRule, Group, IdempotencyRecord, PromoCode, PromoCodeUsage, + Proxy, RedeemCode, SecuritySecret, Setting, UsageCleanupTask, UsageLog, User, + UserAllowedGroup, UserAttributeDefinition, UserAttributeValue, + UserSubscription []ent.Interceptor } ) diff --git a/backend/ent/ent.go b/backend/ent/ent.go index c4ec33873..5197e4d84 100644 --- a/backend/ent/ent.go +++ b/backend/ent/ent.go @@ -19,6 +19,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" @@ -99,6 +100,7 @@ func checkColumn(t, c string) error { announcementread.Table: announcementread.ValidColumn, errorpassthroughrule.Table: errorpassthroughrule.ValidColumn, group.Table: group.ValidColumn, + idempotencyrecord.Table: idempotencyrecord.ValidColumn, promocode.Table: promocode.ValidColumn, promocodeusage.Table: promocodeusage.ValidColumn, proxy.Table: proxy.ValidColumn, diff --git a/backend/ent/hook/hook.go b/backend/ent/hook/hook.go index aff9caa02..49d7f3c55 100644 --- a/backend/ent/hook/hook.go +++ b/backend/ent/hook/hook.go @@ -93,6 +93,18 @@ func (f GroupFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.GroupMutation", m) } +// The IdempotencyRecordFunc type is an adapter to allow the use of ordinary +// function as IdempotencyRecord mutator. +type IdempotencyRecordFunc func(context.Context, *ent.IdempotencyRecordMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f IdempotencyRecordFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.IdempotencyRecordMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.IdempotencyRecordMutation", m) +} + // The PromoCodeFunc type is an adapter to allow the use of ordinary // function as PromoCode mutator. type PromoCodeFunc func(context.Context, *ent.PromoCodeMutation) (ent.Value, error) diff --git a/backend/ent/idempotencyrecord.go b/backend/ent/idempotencyrecord.go new file mode 100644 index 000000000..ab120f8f8 --- /dev/null +++ b/backend/ent/idempotencyrecord.go @@ -0,0 +1,228 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" +) + +// IdempotencyRecord is the model entity for the IdempotencyRecord schema. +type IdempotencyRecord struct { + config `json:"-"` + // ID of the ent. + ID int64 `json:"id,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Scope holds the value of the "scope" field. + Scope string `json:"scope,omitempty"` + // IdempotencyKeyHash holds the value of the "idempotency_key_hash" field. + IdempotencyKeyHash string `json:"idempotency_key_hash,omitempty"` + // RequestFingerprint holds the value of the "request_fingerprint" field. + RequestFingerprint string `json:"request_fingerprint,omitempty"` + // Status holds the value of the "status" field. + Status string `json:"status,omitempty"` + // ResponseStatus holds the value of the "response_status" field. + ResponseStatus *int `json:"response_status,omitempty"` + // ResponseBody holds the value of the "response_body" field. + ResponseBody *string `json:"response_body,omitempty"` + // ErrorReason holds the value of the "error_reason" field. + ErrorReason *string `json:"error_reason,omitempty"` + // LockedUntil holds the value of the "locked_until" field. + LockedUntil *time.Time `json:"locked_until,omitempty"` + // ExpiresAt holds the value of the "expires_at" field. + ExpiresAt time.Time `json:"expires_at,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*IdempotencyRecord) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case idempotencyrecord.FieldID, idempotencyrecord.FieldResponseStatus: + values[i] = new(sql.NullInt64) + case idempotencyrecord.FieldScope, idempotencyrecord.FieldIdempotencyKeyHash, idempotencyrecord.FieldRequestFingerprint, idempotencyrecord.FieldStatus, idempotencyrecord.FieldResponseBody, idempotencyrecord.FieldErrorReason: + values[i] = new(sql.NullString) + case idempotencyrecord.FieldCreatedAt, idempotencyrecord.FieldUpdatedAt, idempotencyrecord.FieldLockedUntil, idempotencyrecord.FieldExpiresAt: + values[i] = new(sql.NullTime) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the IdempotencyRecord fields. +func (_m *IdempotencyRecord) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case idempotencyrecord.FieldID: + value, ok := values[i].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + _m.ID = int64(value.Int64) + case idempotencyrecord.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + _m.CreatedAt = value.Time + } + case idempotencyrecord.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + _m.UpdatedAt = value.Time + } + case idempotencyrecord.FieldScope: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field scope", values[i]) + } else if value.Valid { + _m.Scope = value.String + } + case idempotencyrecord.FieldIdempotencyKeyHash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field idempotency_key_hash", values[i]) + } else if value.Valid { + _m.IdempotencyKeyHash = value.String + } + case idempotencyrecord.FieldRequestFingerprint: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field request_fingerprint", values[i]) + } else if value.Valid { + _m.RequestFingerprint = value.String + } + case idempotencyrecord.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + _m.Status = value.String + } + case idempotencyrecord.FieldResponseStatus: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field response_status", values[i]) + } else if value.Valid { + _m.ResponseStatus = new(int) + *_m.ResponseStatus = int(value.Int64) + } + case idempotencyrecord.FieldResponseBody: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field response_body", values[i]) + } else if value.Valid { + _m.ResponseBody = new(string) + *_m.ResponseBody = value.String + } + case idempotencyrecord.FieldErrorReason: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field error_reason", values[i]) + } else if value.Valid { + _m.ErrorReason = new(string) + *_m.ErrorReason = value.String + } + case idempotencyrecord.FieldLockedUntil: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field locked_until", values[i]) + } else if value.Valid { + _m.LockedUntil = new(time.Time) + *_m.LockedUntil = value.Time + } + case idempotencyrecord.FieldExpiresAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field expires_at", values[i]) + } else if value.Valid { + _m.ExpiresAt = value.Time + } + default: + _m.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the IdempotencyRecord. +// This includes values selected through modifiers, order, etc. +func (_m *IdempotencyRecord) Value(name string) (ent.Value, error) { + return _m.selectValues.Get(name) +} + +// Update returns a builder for updating this IdempotencyRecord. +// Note that you need to call IdempotencyRecord.Unwrap() before calling this method if this IdempotencyRecord +// was returned from a transaction, and the transaction was committed or rolled back. +func (_m *IdempotencyRecord) Update() *IdempotencyRecordUpdateOne { + return NewIdempotencyRecordClient(_m.config).UpdateOne(_m) +} + +// Unwrap unwraps the IdempotencyRecord entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (_m *IdempotencyRecord) Unwrap() *IdempotencyRecord { + _tx, ok := _m.config.driver.(*txDriver) + if !ok { + panic("ent: IdempotencyRecord is not a transactional entity") + } + _m.config.driver = _tx.drv + return _m +} + +// String implements the fmt.Stringer. +func (_m *IdempotencyRecord) String() string { + var builder strings.Builder + builder.WriteString("IdempotencyRecord(") + builder.WriteString(fmt.Sprintf("id=%v, ", _m.ID)) + builder.WriteString("created_at=") + builder.WriteString(_m.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(_m.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("scope=") + builder.WriteString(_m.Scope) + builder.WriteString(", ") + builder.WriteString("idempotency_key_hash=") + builder.WriteString(_m.IdempotencyKeyHash) + builder.WriteString(", ") + builder.WriteString("request_fingerprint=") + builder.WriteString(_m.RequestFingerprint) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(_m.Status) + builder.WriteString(", ") + if v := _m.ResponseStatus; v != nil { + builder.WriteString("response_status=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := _m.ResponseBody; v != nil { + builder.WriteString("response_body=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.ErrorReason; v != nil { + builder.WriteString("error_reason=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := _m.LockedUntil; v != nil { + builder.WriteString("locked_until=") + builder.WriteString(v.Format(time.ANSIC)) + } + builder.WriteString(", ") + builder.WriteString("expires_at=") + builder.WriteString(_m.ExpiresAt.Format(time.ANSIC)) + builder.WriteByte(')') + return builder.String() +} + +// IdempotencyRecords is a parsable slice of IdempotencyRecord. +type IdempotencyRecords []*IdempotencyRecord diff --git a/backend/ent/idempotencyrecord/idempotencyrecord.go b/backend/ent/idempotencyrecord/idempotencyrecord.go new file mode 100644 index 000000000..d9686f607 --- /dev/null +++ b/backend/ent/idempotencyrecord/idempotencyrecord.go @@ -0,0 +1,148 @@ +// Code generated by ent, DO NOT EDIT. + +package idempotencyrecord + +import ( + "time" + + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the idempotencyrecord type in the database. + Label = "idempotency_record" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldScope holds the string denoting the scope field in the database. + FieldScope = "scope" + // FieldIdempotencyKeyHash holds the string denoting the idempotency_key_hash field in the database. + FieldIdempotencyKeyHash = "idempotency_key_hash" + // FieldRequestFingerprint holds the string denoting the request_fingerprint field in the database. + FieldRequestFingerprint = "request_fingerprint" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // FieldResponseStatus holds the string denoting the response_status field in the database. + FieldResponseStatus = "response_status" + // FieldResponseBody holds the string denoting the response_body field in the database. + FieldResponseBody = "response_body" + // FieldErrorReason holds the string denoting the error_reason field in the database. + FieldErrorReason = "error_reason" + // FieldLockedUntil holds the string denoting the locked_until field in the database. + FieldLockedUntil = "locked_until" + // FieldExpiresAt holds the string denoting the expires_at field in the database. + FieldExpiresAt = "expires_at" + // Table holds the table name of the idempotencyrecord in the database. + Table = "idempotency_records" +) + +// Columns holds all SQL columns for idempotencyrecord fields. +var Columns = []string{ + FieldID, + FieldCreatedAt, + FieldUpdatedAt, + FieldScope, + FieldIdempotencyKeyHash, + FieldRequestFingerprint, + FieldStatus, + FieldResponseStatus, + FieldResponseBody, + FieldErrorReason, + FieldLockedUntil, + FieldExpiresAt, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +var ( + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // ScopeValidator is a validator for the "scope" field. It is called by the builders before save. + ScopeValidator func(string) error + // IdempotencyKeyHashValidator is a validator for the "idempotency_key_hash" field. It is called by the builders before save. + IdempotencyKeyHashValidator func(string) error + // RequestFingerprintValidator is a validator for the "request_fingerprint" field. It is called by the builders before save. + RequestFingerprintValidator func(string) error + // StatusValidator is a validator for the "status" field. It is called by the builders before save. + StatusValidator func(string) error + // ErrorReasonValidator is a validator for the "error_reason" field. It is called by the builders before save. + ErrorReasonValidator func(string) error +) + +// OrderOption defines the ordering options for the IdempotencyRecord queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByScope orders the results by the scope field. +func ByScope(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldScope, opts...).ToFunc() +} + +// ByIdempotencyKeyHash orders the results by the idempotency_key_hash field. +func ByIdempotencyKeyHash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIdempotencyKeyHash, opts...).ToFunc() +} + +// ByRequestFingerprint orders the results by the request_fingerprint field. +func ByRequestFingerprint(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldRequestFingerprint, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByResponseStatus orders the results by the response_status field. +func ByResponseStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldResponseStatus, opts...).ToFunc() +} + +// ByResponseBody orders the results by the response_body field. +func ByResponseBody(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldResponseBody, opts...).ToFunc() +} + +// ByErrorReason orders the results by the error_reason field. +func ByErrorReason(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldErrorReason, opts...).ToFunc() +} + +// ByLockedUntil orders the results by the locked_until field. +func ByLockedUntil(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldLockedUntil, opts...).ToFunc() +} + +// ByExpiresAt orders the results by the expires_at field. +func ByExpiresAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldExpiresAt, opts...).ToFunc() +} diff --git a/backend/ent/idempotencyrecord/where.go b/backend/ent/idempotencyrecord/where.go new file mode 100644 index 000000000..c3d8d9d5e --- /dev/null +++ b/backend/ent/idempotencyrecord/where.go @@ -0,0 +1,755 @@ +// Code generated by ent, DO NOT EDIT. + +package idempotencyrecord + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int64) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldID, id)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Scope applies equality check predicate on the "scope" field. It's identical to ScopeEQ. +func Scope(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldScope, v)) +} + +// IdempotencyKeyHash applies equality check predicate on the "idempotency_key_hash" field. It's identical to IdempotencyKeyHashEQ. +func IdempotencyKeyHash(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldIdempotencyKeyHash, v)) +} + +// RequestFingerprint applies equality check predicate on the "request_fingerprint" field. It's identical to RequestFingerprintEQ. +func RequestFingerprint(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldRequestFingerprint, v)) +} + +// Status applies equality check predicate on the "status" field. It's identical to StatusEQ. +func Status(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldStatus, v)) +} + +// ResponseStatus applies equality check predicate on the "response_status" field. It's identical to ResponseStatusEQ. +func ResponseStatus(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseStatus, v)) +} + +// ResponseBody applies equality check predicate on the "response_body" field. It's identical to ResponseBodyEQ. +func ResponseBody(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseBody, v)) +} + +// ErrorReason applies equality check predicate on the "error_reason" field. It's identical to ErrorReasonEQ. +func ErrorReason(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldErrorReason, v)) +} + +// LockedUntil applies equality check predicate on the "locked_until" field. It's identical to LockedUntilEQ. +func LockedUntil(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldLockedUntil, v)) +} + +// ExpiresAt applies equality check predicate on the "expires_at" field. It's identical to ExpiresAtEQ. +func ExpiresAt(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldExpiresAt, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// ScopeEQ applies the EQ predicate on the "scope" field. +func ScopeEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldScope, v)) +} + +// ScopeNEQ applies the NEQ predicate on the "scope" field. +func ScopeNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldScope, v)) +} + +// ScopeIn applies the In predicate on the "scope" field. +func ScopeIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldScope, vs...)) +} + +// ScopeNotIn applies the NotIn predicate on the "scope" field. +func ScopeNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldScope, vs...)) +} + +// ScopeGT applies the GT predicate on the "scope" field. +func ScopeGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldScope, v)) +} + +// ScopeGTE applies the GTE predicate on the "scope" field. +func ScopeGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldScope, v)) +} + +// ScopeLT applies the LT predicate on the "scope" field. +func ScopeLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldScope, v)) +} + +// ScopeLTE applies the LTE predicate on the "scope" field. +func ScopeLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldScope, v)) +} + +// ScopeContains applies the Contains predicate on the "scope" field. +func ScopeContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldScope, v)) +} + +// ScopeHasPrefix applies the HasPrefix predicate on the "scope" field. +func ScopeHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldScope, v)) +} + +// ScopeHasSuffix applies the HasSuffix predicate on the "scope" field. +func ScopeHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldScope, v)) +} + +// ScopeEqualFold applies the EqualFold predicate on the "scope" field. +func ScopeEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldScope, v)) +} + +// ScopeContainsFold applies the ContainsFold predicate on the "scope" field. +func ScopeContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldScope, v)) +} + +// IdempotencyKeyHashEQ applies the EQ predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashNEQ applies the NEQ predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashIn applies the In predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldIdempotencyKeyHash, vs...)) +} + +// IdempotencyKeyHashNotIn applies the NotIn predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldIdempotencyKeyHash, vs...)) +} + +// IdempotencyKeyHashGT applies the GT predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashGTE applies the GTE predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashLT applies the LT predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashLTE applies the LTE predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashContains applies the Contains predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashHasPrefix applies the HasPrefix predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashHasSuffix applies the HasSuffix predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashEqualFold applies the EqualFold predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldIdempotencyKeyHash, v)) +} + +// IdempotencyKeyHashContainsFold applies the ContainsFold predicate on the "idempotency_key_hash" field. +func IdempotencyKeyHashContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldIdempotencyKeyHash, v)) +} + +// RequestFingerprintEQ applies the EQ predicate on the "request_fingerprint" field. +func RequestFingerprintEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldRequestFingerprint, v)) +} + +// RequestFingerprintNEQ applies the NEQ predicate on the "request_fingerprint" field. +func RequestFingerprintNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldRequestFingerprint, v)) +} + +// RequestFingerprintIn applies the In predicate on the "request_fingerprint" field. +func RequestFingerprintIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldRequestFingerprint, vs...)) +} + +// RequestFingerprintNotIn applies the NotIn predicate on the "request_fingerprint" field. +func RequestFingerprintNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldRequestFingerprint, vs...)) +} + +// RequestFingerprintGT applies the GT predicate on the "request_fingerprint" field. +func RequestFingerprintGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldRequestFingerprint, v)) +} + +// RequestFingerprintGTE applies the GTE predicate on the "request_fingerprint" field. +func RequestFingerprintGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldRequestFingerprint, v)) +} + +// RequestFingerprintLT applies the LT predicate on the "request_fingerprint" field. +func RequestFingerprintLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldRequestFingerprint, v)) +} + +// RequestFingerprintLTE applies the LTE predicate on the "request_fingerprint" field. +func RequestFingerprintLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldRequestFingerprint, v)) +} + +// RequestFingerprintContains applies the Contains predicate on the "request_fingerprint" field. +func RequestFingerprintContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldRequestFingerprint, v)) +} + +// RequestFingerprintHasPrefix applies the HasPrefix predicate on the "request_fingerprint" field. +func RequestFingerprintHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldRequestFingerprint, v)) +} + +// RequestFingerprintHasSuffix applies the HasSuffix predicate on the "request_fingerprint" field. +func RequestFingerprintHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldRequestFingerprint, v)) +} + +// RequestFingerprintEqualFold applies the EqualFold predicate on the "request_fingerprint" field. +func RequestFingerprintEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldRequestFingerprint, v)) +} + +// RequestFingerprintContainsFold applies the ContainsFold predicate on the "request_fingerprint" field. +func RequestFingerprintContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldRequestFingerprint, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldStatus, vs...)) +} + +// StatusGT applies the GT predicate on the "status" field. +func StatusGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldStatus, v)) +} + +// StatusGTE applies the GTE predicate on the "status" field. +func StatusGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldStatus, v)) +} + +// StatusLT applies the LT predicate on the "status" field. +func StatusLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldStatus, v)) +} + +// StatusLTE applies the LTE predicate on the "status" field. +func StatusLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldStatus, v)) +} + +// StatusContains applies the Contains predicate on the "status" field. +func StatusContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldStatus, v)) +} + +// StatusHasPrefix applies the HasPrefix predicate on the "status" field. +func StatusHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldStatus, v)) +} + +// StatusHasSuffix applies the HasSuffix predicate on the "status" field. +func StatusHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldStatus, v)) +} + +// StatusEqualFold applies the EqualFold predicate on the "status" field. +func StatusEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldStatus, v)) +} + +// StatusContainsFold applies the ContainsFold predicate on the "status" field. +func StatusContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldStatus, v)) +} + +// ResponseStatusEQ applies the EQ predicate on the "response_status" field. +func ResponseStatusEQ(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseStatus, v)) +} + +// ResponseStatusNEQ applies the NEQ predicate on the "response_status" field. +func ResponseStatusNEQ(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldResponseStatus, v)) +} + +// ResponseStatusIn applies the In predicate on the "response_status" field. +func ResponseStatusIn(vs ...int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldResponseStatus, vs...)) +} + +// ResponseStatusNotIn applies the NotIn predicate on the "response_status" field. +func ResponseStatusNotIn(vs ...int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldResponseStatus, vs...)) +} + +// ResponseStatusGT applies the GT predicate on the "response_status" field. +func ResponseStatusGT(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldResponseStatus, v)) +} + +// ResponseStatusGTE applies the GTE predicate on the "response_status" field. +func ResponseStatusGTE(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldResponseStatus, v)) +} + +// ResponseStatusLT applies the LT predicate on the "response_status" field. +func ResponseStatusLT(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldResponseStatus, v)) +} + +// ResponseStatusLTE applies the LTE predicate on the "response_status" field. +func ResponseStatusLTE(v int) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldResponseStatus, v)) +} + +// ResponseStatusIsNil applies the IsNil predicate on the "response_status" field. +func ResponseStatusIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldResponseStatus)) +} + +// ResponseStatusNotNil applies the NotNil predicate on the "response_status" field. +func ResponseStatusNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldResponseStatus)) +} + +// ResponseBodyEQ applies the EQ predicate on the "response_body" field. +func ResponseBodyEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldResponseBody, v)) +} + +// ResponseBodyNEQ applies the NEQ predicate on the "response_body" field. +func ResponseBodyNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldResponseBody, v)) +} + +// ResponseBodyIn applies the In predicate on the "response_body" field. +func ResponseBodyIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldResponseBody, vs...)) +} + +// ResponseBodyNotIn applies the NotIn predicate on the "response_body" field. +func ResponseBodyNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldResponseBody, vs...)) +} + +// ResponseBodyGT applies the GT predicate on the "response_body" field. +func ResponseBodyGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldResponseBody, v)) +} + +// ResponseBodyGTE applies the GTE predicate on the "response_body" field. +func ResponseBodyGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldResponseBody, v)) +} + +// ResponseBodyLT applies the LT predicate on the "response_body" field. +func ResponseBodyLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldResponseBody, v)) +} + +// ResponseBodyLTE applies the LTE predicate on the "response_body" field. +func ResponseBodyLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldResponseBody, v)) +} + +// ResponseBodyContains applies the Contains predicate on the "response_body" field. +func ResponseBodyContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldResponseBody, v)) +} + +// ResponseBodyHasPrefix applies the HasPrefix predicate on the "response_body" field. +func ResponseBodyHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldResponseBody, v)) +} + +// ResponseBodyHasSuffix applies the HasSuffix predicate on the "response_body" field. +func ResponseBodyHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldResponseBody, v)) +} + +// ResponseBodyIsNil applies the IsNil predicate on the "response_body" field. +func ResponseBodyIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldResponseBody)) +} + +// ResponseBodyNotNil applies the NotNil predicate on the "response_body" field. +func ResponseBodyNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldResponseBody)) +} + +// ResponseBodyEqualFold applies the EqualFold predicate on the "response_body" field. +func ResponseBodyEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldResponseBody, v)) +} + +// ResponseBodyContainsFold applies the ContainsFold predicate on the "response_body" field. +func ResponseBodyContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldResponseBody, v)) +} + +// ErrorReasonEQ applies the EQ predicate on the "error_reason" field. +func ErrorReasonEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldErrorReason, v)) +} + +// ErrorReasonNEQ applies the NEQ predicate on the "error_reason" field. +func ErrorReasonNEQ(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldErrorReason, v)) +} + +// ErrorReasonIn applies the In predicate on the "error_reason" field. +func ErrorReasonIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldErrorReason, vs...)) +} + +// ErrorReasonNotIn applies the NotIn predicate on the "error_reason" field. +func ErrorReasonNotIn(vs ...string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldErrorReason, vs...)) +} + +// ErrorReasonGT applies the GT predicate on the "error_reason" field. +func ErrorReasonGT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldErrorReason, v)) +} + +// ErrorReasonGTE applies the GTE predicate on the "error_reason" field. +func ErrorReasonGTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldErrorReason, v)) +} + +// ErrorReasonLT applies the LT predicate on the "error_reason" field. +func ErrorReasonLT(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldErrorReason, v)) +} + +// ErrorReasonLTE applies the LTE predicate on the "error_reason" field. +func ErrorReasonLTE(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldErrorReason, v)) +} + +// ErrorReasonContains applies the Contains predicate on the "error_reason" field. +func ErrorReasonContains(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContains(FieldErrorReason, v)) +} + +// ErrorReasonHasPrefix applies the HasPrefix predicate on the "error_reason" field. +func ErrorReasonHasPrefix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasPrefix(FieldErrorReason, v)) +} + +// ErrorReasonHasSuffix applies the HasSuffix predicate on the "error_reason" field. +func ErrorReasonHasSuffix(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldHasSuffix(FieldErrorReason, v)) +} + +// ErrorReasonIsNil applies the IsNil predicate on the "error_reason" field. +func ErrorReasonIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldErrorReason)) +} + +// ErrorReasonNotNil applies the NotNil predicate on the "error_reason" field. +func ErrorReasonNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldErrorReason)) +} + +// ErrorReasonEqualFold applies the EqualFold predicate on the "error_reason" field. +func ErrorReasonEqualFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEqualFold(FieldErrorReason, v)) +} + +// ErrorReasonContainsFold applies the ContainsFold predicate on the "error_reason" field. +func ErrorReasonContainsFold(v string) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldContainsFold(FieldErrorReason, v)) +} + +// LockedUntilEQ applies the EQ predicate on the "locked_until" field. +func LockedUntilEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldLockedUntil, v)) +} + +// LockedUntilNEQ applies the NEQ predicate on the "locked_until" field. +func LockedUntilNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldLockedUntil, v)) +} + +// LockedUntilIn applies the In predicate on the "locked_until" field. +func LockedUntilIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldLockedUntil, vs...)) +} + +// LockedUntilNotIn applies the NotIn predicate on the "locked_until" field. +func LockedUntilNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldLockedUntil, vs...)) +} + +// LockedUntilGT applies the GT predicate on the "locked_until" field. +func LockedUntilGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldLockedUntil, v)) +} + +// LockedUntilGTE applies the GTE predicate on the "locked_until" field. +func LockedUntilGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldLockedUntil, v)) +} + +// LockedUntilLT applies the LT predicate on the "locked_until" field. +func LockedUntilLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldLockedUntil, v)) +} + +// LockedUntilLTE applies the LTE predicate on the "locked_until" field. +func LockedUntilLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldLockedUntil, v)) +} + +// LockedUntilIsNil applies the IsNil predicate on the "locked_until" field. +func LockedUntilIsNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIsNull(FieldLockedUntil)) +} + +// LockedUntilNotNil applies the NotNil predicate on the "locked_until" field. +func LockedUntilNotNil() predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotNull(FieldLockedUntil)) +} + +// ExpiresAtEQ applies the EQ predicate on the "expires_at" field. +func ExpiresAtEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldEQ(FieldExpiresAt, v)) +} + +// ExpiresAtNEQ applies the NEQ predicate on the "expires_at" field. +func ExpiresAtNEQ(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNEQ(FieldExpiresAt, v)) +} + +// ExpiresAtIn applies the In predicate on the "expires_at" field. +func ExpiresAtIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtNotIn applies the NotIn predicate on the "expires_at" field. +func ExpiresAtNotIn(vs ...time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldNotIn(FieldExpiresAt, vs...)) +} + +// ExpiresAtGT applies the GT predicate on the "expires_at" field. +func ExpiresAtGT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGT(FieldExpiresAt, v)) +} + +// ExpiresAtGTE applies the GTE predicate on the "expires_at" field. +func ExpiresAtGTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldGTE(FieldExpiresAt, v)) +} + +// ExpiresAtLT applies the LT predicate on the "expires_at" field. +func ExpiresAtLT(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLT(FieldExpiresAt, v)) +} + +// ExpiresAtLTE applies the LTE predicate on the "expires_at" field. +func ExpiresAtLTE(v time.Time) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.FieldLTE(FieldExpiresAt, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.IdempotencyRecord) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.IdempotencyRecord) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.IdempotencyRecord) predicate.IdempotencyRecord { + return predicate.IdempotencyRecord(sql.NotPredicates(p)) +} diff --git a/backend/ent/idempotencyrecord_create.go b/backend/ent/idempotencyrecord_create.go new file mode 100644 index 000000000..bf4deaf20 --- /dev/null +++ b/backend/ent/idempotencyrecord_create.go @@ -0,0 +1,1132 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" +) + +// IdempotencyRecordCreate is the builder for creating a IdempotencyRecord entity. +type IdempotencyRecordCreate struct { + config + mutation *IdempotencyRecordMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetCreatedAt sets the "created_at" field. +func (_c *IdempotencyRecordCreate) SetCreatedAt(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetCreatedAt(v) + return _c +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableCreatedAt(v *time.Time) *IdempotencyRecordCreate { + if v != nil { + _c.SetCreatedAt(*v) + } + return _c +} + +// SetUpdatedAt sets the "updated_at" field. +func (_c *IdempotencyRecordCreate) SetUpdatedAt(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetUpdatedAt(v) + return _c +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableUpdatedAt(v *time.Time) *IdempotencyRecordCreate { + if v != nil { + _c.SetUpdatedAt(*v) + } + return _c +} + +// SetScope sets the "scope" field. +func (_c *IdempotencyRecordCreate) SetScope(v string) *IdempotencyRecordCreate { + _c.mutation.SetScope(v) + return _c +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (_c *IdempotencyRecordCreate) SetIdempotencyKeyHash(v string) *IdempotencyRecordCreate { + _c.mutation.SetIdempotencyKeyHash(v) + return _c +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (_c *IdempotencyRecordCreate) SetRequestFingerprint(v string) *IdempotencyRecordCreate { + _c.mutation.SetRequestFingerprint(v) + return _c +} + +// SetStatus sets the "status" field. +func (_c *IdempotencyRecordCreate) SetStatus(v string) *IdempotencyRecordCreate { + _c.mutation.SetStatus(v) + return _c +} + +// SetResponseStatus sets the "response_status" field. +func (_c *IdempotencyRecordCreate) SetResponseStatus(v int) *IdempotencyRecordCreate { + _c.mutation.SetResponseStatus(v) + return _c +} + +// SetNillableResponseStatus sets the "response_status" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableResponseStatus(v *int) *IdempotencyRecordCreate { + if v != nil { + _c.SetResponseStatus(*v) + } + return _c +} + +// SetResponseBody sets the "response_body" field. +func (_c *IdempotencyRecordCreate) SetResponseBody(v string) *IdempotencyRecordCreate { + _c.mutation.SetResponseBody(v) + return _c +} + +// SetNillableResponseBody sets the "response_body" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableResponseBody(v *string) *IdempotencyRecordCreate { + if v != nil { + _c.SetResponseBody(*v) + } + return _c +} + +// SetErrorReason sets the "error_reason" field. +func (_c *IdempotencyRecordCreate) SetErrorReason(v string) *IdempotencyRecordCreate { + _c.mutation.SetErrorReason(v) + return _c +} + +// SetNillableErrorReason sets the "error_reason" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableErrorReason(v *string) *IdempotencyRecordCreate { + if v != nil { + _c.SetErrorReason(*v) + } + return _c +} + +// SetLockedUntil sets the "locked_until" field. +func (_c *IdempotencyRecordCreate) SetLockedUntil(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetLockedUntil(v) + return _c +} + +// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil. +func (_c *IdempotencyRecordCreate) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordCreate { + if v != nil { + _c.SetLockedUntil(*v) + } + return _c +} + +// SetExpiresAt sets the "expires_at" field. +func (_c *IdempotencyRecordCreate) SetExpiresAt(v time.Time) *IdempotencyRecordCreate { + _c.mutation.SetExpiresAt(v) + return _c +} + +// Mutation returns the IdempotencyRecordMutation object of the builder. +func (_c *IdempotencyRecordCreate) Mutation() *IdempotencyRecordMutation { + return _c.mutation +} + +// Save creates the IdempotencyRecord in the database. +func (_c *IdempotencyRecordCreate) Save(ctx context.Context) (*IdempotencyRecord, error) { + _c.defaults() + return withHooks(ctx, _c.sqlSave, _c.mutation, _c.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (_c *IdempotencyRecordCreate) SaveX(ctx context.Context) *IdempotencyRecord { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *IdempotencyRecordCreate) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *IdempotencyRecordCreate) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_c *IdempotencyRecordCreate) defaults() { + if _, ok := _c.mutation.CreatedAt(); !ok { + v := idempotencyrecord.DefaultCreatedAt() + _c.mutation.SetCreatedAt(v) + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + v := idempotencyrecord.DefaultUpdatedAt() + _c.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_c *IdempotencyRecordCreate) check() error { + if _, ok := _c.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "IdempotencyRecord.created_at"`)} + } + if _, ok := _c.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "IdempotencyRecord.updated_at"`)} + } + if _, ok := _c.mutation.Scope(); !ok { + return &ValidationError{Name: "scope", err: errors.New(`ent: missing required field "IdempotencyRecord.scope"`)} + } + if v, ok := _c.mutation.Scope(); ok { + if err := idempotencyrecord.ScopeValidator(v); err != nil { + return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)} + } + } + if _, ok := _c.mutation.IdempotencyKeyHash(); !ok { + return &ValidationError{Name: "idempotency_key_hash", err: errors.New(`ent: missing required field "IdempotencyRecord.idempotency_key_hash"`)} + } + if v, ok := _c.mutation.IdempotencyKeyHash(); ok { + if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil { + return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)} + } + } + if _, ok := _c.mutation.RequestFingerprint(); !ok { + return &ValidationError{Name: "request_fingerprint", err: errors.New(`ent: missing required field "IdempotencyRecord.request_fingerprint"`)} + } + if v, ok := _c.mutation.RequestFingerprint(); ok { + if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil { + return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)} + } + } + if _, ok := _c.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "IdempotencyRecord.status"`)} + } + if v, ok := _c.mutation.Status(); ok { + if err := idempotencyrecord.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)} + } + } + if v, ok := _c.mutation.ErrorReason(); ok { + if err := idempotencyrecord.ErrorReasonValidator(v); err != nil { + return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)} + } + } + if _, ok := _c.mutation.ExpiresAt(); !ok { + return &ValidationError{Name: "expires_at", err: errors.New(`ent: missing required field "IdempotencyRecord.expires_at"`)} + } + return nil +} + +func (_c *IdempotencyRecordCreate) sqlSave(ctx context.Context) (*IdempotencyRecord, error) { + if err := _c.check(); err != nil { + return nil, err + } + _node, _spec := _c.createSpec() + if err := sqlgraph.CreateNode(ctx, _c.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int64(id) + _c.mutation.id = &_node.ID + _c.mutation.done = true + return _node, nil +} + +func (_c *IdempotencyRecordCreate) createSpec() (*IdempotencyRecord, *sqlgraph.CreateSpec) { + var ( + _node = &IdempotencyRecord{config: _c.config} + _spec = sqlgraph.NewCreateSpec(idempotencyrecord.Table, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + ) + _spec.OnConflict = _c.conflict + if value, ok := _c.mutation.CreatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := _c.mutation.UpdatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := _c.mutation.Scope(); ok { + _spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value) + _node.Scope = value + } + if value, ok := _c.mutation.IdempotencyKeyHash(); ok { + _spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value) + _node.IdempotencyKeyHash = value + } + if value, ok := _c.mutation.RequestFingerprint(); ok { + _spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value) + _node.RequestFingerprint = value + } + if value, ok := _c.mutation.Status(); ok { + _spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value) + _node.Status = value + } + if value, ok := _c.mutation.ResponseStatus(); ok { + _spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + _node.ResponseStatus = &value + } + if value, ok := _c.mutation.ResponseBody(); ok { + _spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value) + _node.ResponseBody = &value + } + if value, ok := _c.mutation.ErrorReason(); ok { + _spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value) + _node.ErrorReason = &value + } + if value, ok := _c.mutation.LockedUntil(); ok { + _spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value) + _node.LockedUntil = &value + } + if value, ok := _c.mutation.ExpiresAt(); ok { + _spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value) + _node.ExpiresAt = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.IdempotencyRecord.Create(). +// SetCreatedAt(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.IdempotencyRecordUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *IdempotencyRecordCreate) OnConflict(opts ...sql.ConflictOption) *IdempotencyRecordUpsertOne { + _c.conflict = opts + return &IdempotencyRecordUpsertOne{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *IdempotencyRecordCreate) OnConflictColumns(columns ...string) *IdempotencyRecordUpsertOne { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &IdempotencyRecordUpsertOne{ + create: _c, + } +} + +type ( + // IdempotencyRecordUpsertOne is the builder for "upsert"-ing + // one IdempotencyRecord node. + IdempotencyRecordUpsertOne struct { + create *IdempotencyRecordCreate + } + + // IdempotencyRecordUpsert is the "OnConflict" setter. + IdempotencyRecordUpsert struct { + *sql.UpdateSet + } +) + +// SetUpdatedAt sets the "updated_at" field. +func (u *IdempotencyRecordUpsert) SetUpdatedAt(v time.Time) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateUpdatedAt() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldUpdatedAt) + return u +} + +// SetScope sets the "scope" field. +func (u *IdempotencyRecordUpsert) SetScope(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldScope, v) + return u +} + +// UpdateScope sets the "scope" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateScope() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldScope) + return u +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (u *IdempotencyRecordUpsert) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldIdempotencyKeyHash, v) + return u +} + +// UpdateIdempotencyKeyHash sets the "idempotency_key_hash" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateIdempotencyKeyHash() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldIdempotencyKeyHash) + return u +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (u *IdempotencyRecordUpsert) SetRequestFingerprint(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldRequestFingerprint, v) + return u +} + +// UpdateRequestFingerprint sets the "request_fingerprint" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateRequestFingerprint() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldRequestFingerprint) + return u +} + +// SetStatus sets the "status" field. +func (u *IdempotencyRecordUpsert) SetStatus(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateStatus() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldStatus) + return u +} + +// SetResponseStatus sets the "response_status" field. +func (u *IdempotencyRecordUpsert) SetResponseStatus(v int) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldResponseStatus, v) + return u +} + +// UpdateResponseStatus sets the "response_status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateResponseStatus() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldResponseStatus) + return u +} + +// AddResponseStatus adds v to the "response_status" field. +func (u *IdempotencyRecordUpsert) AddResponseStatus(v int) *IdempotencyRecordUpsert { + u.Add(idempotencyrecord.FieldResponseStatus, v) + return u +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (u *IdempotencyRecordUpsert) ClearResponseStatus() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldResponseStatus) + return u +} + +// SetResponseBody sets the "response_body" field. +func (u *IdempotencyRecordUpsert) SetResponseBody(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldResponseBody, v) + return u +} + +// UpdateResponseBody sets the "response_body" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateResponseBody() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldResponseBody) + return u +} + +// ClearResponseBody clears the value of the "response_body" field. +func (u *IdempotencyRecordUpsert) ClearResponseBody() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldResponseBody) + return u +} + +// SetErrorReason sets the "error_reason" field. +func (u *IdempotencyRecordUpsert) SetErrorReason(v string) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldErrorReason, v) + return u +} + +// UpdateErrorReason sets the "error_reason" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateErrorReason() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldErrorReason) + return u +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (u *IdempotencyRecordUpsert) ClearErrorReason() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldErrorReason) + return u +} + +// SetLockedUntil sets the "locked_until" field. +func (u *IdempotencyRecordUpsert) SetLockedUntil(v time.Time) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldLockedUntil, v) + return u +} + +// UpdateLockedUntil sets the "locked_until" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateLockedUntil() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldLockedUntil) + return u +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (u *IdempotencyRecordUpsert) ClearLockedUntil() *IdempotencyRecordUpsert { + u.SetNull(idempotencyrecord.FieldLockedUntil) + return u +} + +// SetExpiresAt sets the "expires_at" field. +func (u *IdempotencyRecordUpsert) SetExpiresAt(v time.Time) *IdempotencyRecordUpsert { + u.Set(idempotencyrecord.FieldExpiresAt, v) + return u +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsert) UpdateExpiresAt() *IdempotencyRecordUpsert { + u.SetExcluded(idempotencyrecord.FieldExpiresAt) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *IdempotencyRecordUpsertOne) UpdateNewValues() *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(idempotencyrecord.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *IdempotencyRecordUpsertOne) Ignore() *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *IdempotencyRecordUpsertOne) DoNothing() *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the IdempotencyRecordCreate.OnConflict +// documentation for more info. +func (u *IdempotencyRecordUpsertOne) Update(set func(*IdempotencyRecordUpsert)) *IdempotencyRecordUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&IdempotencyRecordUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *IdempotencyRecordUpsertOne) SetUpdatedAt(v time.Time) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateUpdatedAt() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetScope sets the "scope" field. +func (u *IdempotencyRecordUpsertOne) SetScope(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetScope(v) + }) +} + +// UpdateScope sets the "scope" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateScope() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateScope() + }) +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (u *IdempotencyRecordUpsertOne) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetIdempotencyKeyHash(v) + }) +} + +// UpdateIdempotencyKeyHash sets the "idempotency_key_hash" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateIdempotencyKeyHash() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateIdempotencyKeyHash() + }) +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (u *IdempotencyRecordUpsertOne) SetRequestFingerprint(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetRequestFingerprint(v) + }) +} + +// UpdateRequestFingerprint sets the "request_fingerprint" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateRequestFingerprint() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateRequestFingerprint() + }) +} + +// SetStatus sets the "status" field. +func (u *IdempotencyRecordUpsertOne) SetStatus(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateStatus() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateStatus() + }) +} + +// SetResponseStatus sets the "response_status" field. +func (u *IdempotencyRecordUpsertOne) SetResponseStatus(v int) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseStatus(v) + }) +} + +// AddResponseStatus adds v to the "response_status" field. +func (u *IdempotencyRecordUpsertOne) AddResponseStatus(v int) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.AddResponseStatus(v) + }) +} + +// UpdateResponseStatus sets the "response_status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateResponseStatus() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseStatus() + }) +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (u *IdempotencyRecordUpsertOne) ClearResponseStatus() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseStatus() + }) +} + +// SetResponseBody sets the "response_body" field. +func (u *IdempotencyRecordUpsertOne) SetResponseBody(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseBody(v) + }) +} + +// UpdateResponseBody sets the "response_body" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateResponseBody() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseBody() + }) +} + +// ClearResponseBody clears the value of the "response_body" field. +func (u *IdempotencyRecordUpsertOne) ClearResponseBody() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseBody() + }) +} + +// SetErrorReason sets the "error_reason" field. +func (u *IdempotencyRecordUpsertOne) SetErrorReason(v string) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetErrorReason(v) + }) +} + +// UpdateErrorReason sets the "error_reason" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateErrorReason() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateErrorReason() + }) +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (u *IdempotencyRecordUpsertOne) ClearErrorReason() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearErrorReason() + }) +} + +// SetLockedUntil sets the "locked_until" field. +func (u *IdempotencyRecordUpsertOne) SetLockedUntil(v time.Time) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetLockedUntil(v) + }) +} + +// UpdateLockedUntil sets the "locked_until" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateLockedUntil() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateLockedUntil() + }) +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (u *IdempotencyRecordUpsertOne) ClearLockedUntil() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearLockedUntil() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *IdempotencyRecordUpsertOne) SetExpiresAt(v time.Time) *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertOne) UpdateExpiresAt() *IdempotencyRecordUpsertOne { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateExpiresAt() + }) +} + +// Exec executes the query. +func (u *IdempotencyRecordUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for IdempotencyRecordCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *IdempotencyRecordUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *IdempotencyRecordUpsertOne) ID(ctx context.Context) (id int64, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *IdempotencyRecordUpsertOne) IDX(ctx context.Context) int64 { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// IdempotencyRecordCreateBulk is the builder for creating many IdempotencyRecord entities in bulk. +type IdempotencyRecordCreateBulk struct { + config + err error + builders []*IdempotencyRecordCreate + conflict []sql.ConflictOption +} + +// Save creates the IdempotencyRecord entities in the database. +func (_c *IdempotencyRecordCreateBulk) Save(ctx context.Context) ([]*IdempotencyRecord, error) { + if _c.err != nil { + return nil, _c.err + } + specs := make([]*sqlgraph.CreateSpec, len(_c.builders)) + nodes := make([]*IdempotencyRecord, len(_c.builders)) + mutators := make([]Mutator, len(_c.builders)) + for i := range _c.builders { + func(i int, root context.Context) { + builder := _c.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*IdempotencyRecordMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, _c.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = _c.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, _c.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int64(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, _c.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (_c *IdempotencyRecordCreateBulk) SaveX(ctx context.Context) []*IdempotencyRecord { + v, err := _c.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (_c *IdempotencyRecordCreateBulk) Exec(ctx context.Context) error { + _, err := _c.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_c *IdempotencyRecordCreateBulk) ExecX(ctx context.Context) { + if err := _c.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.IdempotencyRecord.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.IdempotencyRecordUpsert) { +// SetCreatedAt(v+v). +// }). +// Exec(ctx) +func (_c *IdempotencyRecordCreateBulk) OnConflict(opts ...sql.ConflictOption) *IdempotencyRecordUpsertBulk { + _c.conflict = opts + return &IdempotencyRecordUpsertBulk{ + create: _c, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (_c *IdempotencyRecordCreateBulk) OnConflictColumns(columns ...string) *IdempotencyRecordUpsertBulk { + _c.conflict = append(_c.conflict, sql.ConflictColumns(columns...)) + return &IdempotencyRecordUpsertBulk{ + create: _c, + } +} + +// IdempotencyRecordUpsertBulk is the builder for "upsert"-ing +// a bulk of IdempotencyRecord nodes. +type IdempotencyRecordUpsertBulk struct { + create *IdempotencyRecordCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *IdempotencyRecordUpsertBulk) UpdateNewValues() *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(idempotencyrecord.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.IdempotencyRecord.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *IdempotencyRecordUpsertBulk) Ignore() *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *IdempotencyRecordUpsertBulk) DoNothing() *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the IdempotencyRecordCreateBulk.OnConflict +// documentation for more info. +func (u *IdempotencyRecordUpsertBulk) Update(set func(*IdempotencyRecordUpsert)) *IdempotencyRecordUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&IdempotencyRecordUpsert{UpdateSet: update}) + })) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *IdempotencyRecordUpsertBulk) SetUpdatedAt(v time.Time) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateUpdatedAt() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetScope sets the "scope" field. +func (u *IdempotencyRecordUpsertBulk) SetScope(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetScope(v) + }) +} + +// UpdateScope sets the "scope" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateScope() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateScope() + }) +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (u *IdempotencyRecordUpsertBulk) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetIdempotencyKeyHash(v) + }) +} + +// UpdateIdempotencyKeyHash sets the "idempotency_key_hash" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateIdempotencyKeyHash() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateIdempotencyKeyHash() + }) +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (u *IdempotencyRecordUpsertBulk) SetRequestFingerprint(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetRequestFingerprint(v) + }) +} + +// UpdateRequestFingerprint sets the "request_fingerprint" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateRequestFingerprint() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateRequestFingerprint() + }) +} + +// SetStatus sets the "status" field. +func (u *IdempotencyRecordUpsertBulk) SetStatus(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateStatus() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateStatus() + }) +} + +// SetResponseStatus sets the "response_status" field. +func (u *IdempotencyRecordUpsertBulk) SetResponseStatus(v int) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseStatus(v) + }) +} + +// AddResponseStatus adds v to the "response_status" field. +func (u *IdempotencyRecordUpsertBulk) AddResponseStatus(v int) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.AddResponseStatus(v) + }) +} + +// UpdateResponseStatus sets the "response_status" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateResponseStatus() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseStatus() + }) +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (u *IdempotencyRecordUpsertBulk) ClearResponseStatus() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseStatus() + }) +} + +// SetResponseBody sets the "response_body" field. +func (u *IdempotencyRecordUpsertBulk) SetResponseBody(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetResponseBody(v) + }) +} + +// UpdateResponseBody sets the "response_body" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateResponseBody() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateResponseBody() + }) +} + +// ClearResponseBody clears the value of the "response_body" field. +func (u *IdempotencyRecordUpsertBulk) ClearResponseBody() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearResponseBody() + }) +} + +// SetErrorReason sets the "error_reason" field. +func (u *IdempotencyRecordUpsertBulk) SetErrorReason(v string) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetErrorReason(v) + }) +} + +// UpdateErrorReason sets the "error_reason" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateErrorReason() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateErrorReason() + }) +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (u *IdempotencyRecordUpsertBulk) ClearErrorReason() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearErrorReason() + }) +} + +// SetLockedUntil sets the "locked_until" field. +func (u *IdempotencyRecordUpsertBulk) SetLockedUntil(v time.Time) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetLockedUntil(v) + }) +} + +// UpdateLockedUntil sets the "locked_until" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateLockedUntil() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateLockedUntil() + }) +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (u *IdempotencyRecordUpsertBulk) ClearLockedUntil() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.ClearLockedUntil() + }) +} + +// SetExpiresAt sets the "expires_at" field. +func (u *IdempotencyRecordUpsertBulk) SetExpiresAt(v time.Time) *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.SetExpiresAt(v) + }) +} + +// UpdateExpiresAt sets the "expires_at" field to the value that was provided on create. +func (u *IdempotencyRecordUpsertBulk) UpdateExpiresAt() *IdempotencyRecordUpsertBulk { + return u.Update(func(s *IdempotencyRecordUpsert) { + s.UpdateExpiresAt() + }) +} + +// Exec executes the query. +func (u *IdempotencyRecordUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the IdempotencyRecordCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for IdempotencyRecordCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *IdempotencyRecordUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/idempotencyrecord_delete.go b/backend/ent/idempotencyrecord_delete.go new file mode 100644 index 000000000..f5c875591 --- /dev/null +++ b/backend/ent/idempotencyrecord_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// IdempotencyRecordDelete is the builder for deleting a IdempotencyRecord entity. +type IdempotencyRecordDelete struct { + config + hooks []Hook + mutation *IdempotencyRecordMutation +} + +// Where appends a list predicates to the IdempotencyRecordDelete builder. +func (_d *IdempotencyRecordDelete) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordDelete { + _d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (_d *IdempotencyRecordDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, _d.sqlExec, _d.mutation, _d.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *IdempotencyRecordDelete) ExecX(ctx context.Context) int { + n, err := _d.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (_d *IdempotencyRecordDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(idempotencyrecord.Table, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + if ps := _d.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, _d.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + _d.mutation.done = true + return affected, err +} + +// IdempotencyRecordDeleteOne is the builder for deleting a single IdempotencyRecord entity. +type IdempotencyRecordDeleteOne struct { + _d *IdempotencyRecordDelete +} + +// Where appends a list predicates to the IdempotencyRecordDelete builder. +func (_d *IdempotencyRecordDeleteOne) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordDeleteOne { + _d._d.mutation.Where(ps...) + return _d +} + +// Exec executes the deletion query. +func (_d *IdempotencyRecordDeleteOne) Exec(ctx context.Context) error { + n, err := _d._d.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{idempotencyrecord.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (_d *IdempotencyRecordDeleteOne) ExecX(ctx context.Context) { + if err := _d.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/backend/ent/idempotencyrecord_query.go b/backend/ent/idempotencyrecord_query.go new file mode 100644 index 000000000..fbba4dfa8 --- /dev/null +++ b/backend/ent/idempotencyrecord_query.go @@ -0,0 +1,564 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// IdempotencyRecordQuery is the builder for querying IdempotencyRecord entities. +type IdempotencyRecordQuery struct { + config + ctx *QueryContext + order []idempotencyrecord.OrderOption + inters []Interceptor + predicates []predicate.IdempotencyRecord + modifiers []func(*sql.Selector) + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the IdempotencyRecordQuery builder. +func (_q *IdempotencyRecordQuery) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordQuery { + _q.predicates = append(_q.predicates, ps...) + return _q +} + +// Limit the number of records to be returned by this query. +func (_q *IdempotencyRecordQuery) Limit(limit int) *IdempotencyRecordQuery { + _q.ctx.Limit = &limit + return _q +} + +// Offset to start from. +func (_q *IdempotencyRecordQuery) Offset(offset int) *IdempotencyRecordQuery { + _q.ctx.Offset = &offset + return _q +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (_q *IdempotencyRecordQuery) Unique(unique bool) *IdempotencyRecordQuery { + _q.ctx.Unique = &unique + return _q +} + +// Order specifies how the records should be ordered. +func (_q *IdempotencyRecordQuery) Order(o ...idempotencyrecord.OrderOption) *IdempotencyRecordQuery { + _q.order = append(_q.order, o...) + return _q +} + +// First returns the first IdempotencyRecord entity from the query. +// Returns a *NotFoundError when no IdempotencyRecord was found. +func (_q *IdempotencyRecordQuery) First(ctx context.Context) (*IdempotencyRecord, error) { + nodes, err := _q.Limit(1).All(setContextOp(ctx, _q.ctx, ent.OpQueryFirst)) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{idempotencyrecord.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) FirstX(ctx context.Context) *IdempotencyRecord { + node, err := _q.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first IdempotencyRecord ID from the query. +// Returns a *NotFoundError when no IdempotencyRecord ID was found. +func (_q *IdempotencyRecordQuery) FirstID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(1).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryFirstID)); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{idempotencyrecord.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) FirstIDX(ctx context.Context) int64 { + id, err := _q.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single IdempotencyRecord entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one IdempotencyRecord entity is found. +// Returns a *NotFoundError when no IdempotencyRecord entities are found. +func (_q *IdempotencyRecordQuery) Only(ctx context.Context) (*IdempotencyRecord, error) { + nodes, err := _q.Limit(2).All(setContextOp(ctx, _q.ctx, ent.OpQueryOnly)) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{idempotencyrecord.Label} + default: + return nil, &NotSingularError{idempotencyrecord.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) OnlyX(ctx context.Context) *IdempotencyRecord { + node, err := _q.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only IdempotencyRecord ID in the query. +// Returns a *NotSingularError when more than one IdempotencyRecord ID is found. +// Returns a *NotFoundError when no entities are found. +func (_q *IdempotencyRecordQuery) OnlyID(ctx context.Context) (id int64, err error) { + var ids []int64 + if ids, err = _q.Limit(2).IDs(setContextOp(ctx, _q.ctx, ent.OpQueryOnlyID)); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{idempotencyrecord.Label} + default: + err = &NotSingularError{idempotencyrecord.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) OnlyIDX(ctx context.Context) int64 { + id, err := _q.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of IdempotencyRecords. +func (_q *IdempotencyRecordQuery) All(ctx context.Context) ([]*IdempotencyRecord, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryAll) + if err := _q.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*IdempotencyRecord, *IdempotencyRecordQuery]() + return withInterceptors[[]*IdempotencyRecord](ctx, _q, qr, _q.inters) +} + +// AllX is like All, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) AllX(ctx context.Context) []*IdempotencyRecord { + nodes, err := _q.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of IdempotencyRecord IDs. +func (_q *IdempotencyRecordQuery) IDs(ctx context.Context) (ids []int64, err error) { + if _q.ctx.Unique == nil && _q.path != nil { + _q.Unique(true) + } + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryIDs) + if err = _q.Select(idempotencyrecord.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) IDsX(ctx context.Context) []int64 { + ids, err := _q.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (_q *IdempotencyRecordQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryCount) + if err := _q.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, _q, querierCount[*IdempotencyRecordQuery](), _q.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) CountX(ctx context.Context) int { + count, err := _q.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (_q *IdempotencyRecordQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, _q.ctx, ent.OpQueryExist) + switch _, err := _q.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (_q *IdempotencyRecordQuery) ExistX(ctx context.Context) bool { + exist, err := _q.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the IdempotencyRecordQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (_q *IdempotencyRecordQuery) Clone() *IdempotencyRecordQuery { + if _q == nil { + return nil + } + return &IdempotencyRecordQuery{ + config: _q.config, + ctx: _q.ctx.Clone(), + order: append([]idempotencyrecord.OrderOption{}, _q.order...), + inters: append([]Interceptor{}, _q.inters...), + predicates: append([]predicate.IdempotencyRecord{}, _q.predicates...), + // clone intermediate query. + sql: _q.sql.Clone(), + path: _q.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.IdempotencyRecord.Query(). +// GroupBy(idempotencyrecord.FieldCreatedAt). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (_q *IdempotencyRecordQuery) GroupBy(field string, fields ...string) *IdempotencyRecordGroupBy { + _q.ctx.Fields = append([]string{field}, fields...) + grbuild := &IdempotencyRecordGroupBy{build: _q} + grbuild.flds = &_q.ctx.Fields + grbuild.label = idempotencyrecord.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// CreatedAt time.Time `json:"created_at,omitempty"` +// } +// +// client.IdempotencyRecord.Query(). +// Select(idempotencyrecord.FieldCreatedAt). +// Scan(ctx, &v) +func (_q *IdempotencyRecordQuery) Select(fields ...string) *IdempotencyRecordSelect { + _q.ctx.Fields = append(_q.ctx.Fields, fields...) + sbuild := &IdempotencyRecordSelect{IdempotencyRecordQuery: _q} + sbuild.label = idempotencyrecord.Label + sbuild.flds, sbuild.scan = &_q.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a IdempotencyRecordSelect configured with the given aggregations. +func (_q *IdempotencyRecordQuery) Aggregate(fns ...AggregateFunc) *IdempotencyRecordSelect { + return _q.Select().Aggregate(fns...) +} + +func (_q *IdempotencyRecordQuery) prepareQuery(ctx context.Context) error { + for _, inter := range _q.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, _q); err != nil { + return err + } + } + } + for _, f := range _q.ctx.Fields { + if !idempotencyrecord.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if _q.path != nil { + prev, err := _q.path(ctx) + if err != nil { + return err + } + _q.sql = prev + } + return nil +} + +func (_q *IdempotencyRecordQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*IdempotencyRecord, error) { + var ( + nodes = []*IdempotencyRecord{} + _spec = _q.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*IdempotencyRecord).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &IdempotencyRecord{config: _q.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, _q.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (_q *IdempotencyRecordQuery) sqlCount(ctx context.Context) (int, error) { + _spec := _q.querySpec() + if len(_q.modifiers) > 0 { + _spec.Modifiers = _q.modifiers + } + _spec.Node.Columns = _q.ctx.Fields + if len(_q.ctx.Fields) > 0 { + _spec.Unique = _q.ctx.Unique != nil && *_q.ctx.Unique + } + return sqlgraph.CountNodes(ctx, _q.driver, _spec) +} + +func (_q *IdempotencyRecordQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + _spec.From = _q.sql + if unique := _q.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if _q.path != nil { + _spec.Unique = true + } + if fields := _q.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, idempotencyrecord.FieldID) + for i := range fields { + if fields[i] != idempotencyrecord.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := _q.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := _q.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := _q.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := _q.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (_q *IdempotencyRecordQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(_q.driver.Dialect()) + t1 := builder.Table(idempotencyrecord.Table) + columns := _q.ctx.Fields + if len(columns) == 0 { + columns = idempotencyrecord.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if _q.sql != nil { + selector = _q.sql + selector.Select(selector.Columns(columns...)...) + } + if _q.ctx.Unique != nil && *_q.ctx.Unique { + selector.Distinct() + } + for _, m := range _q.modifiers { + m(selector) + } + for _, p := range _q.predicates { + p(selector) + } + for _, p := range _q.order { + p(selector) + } + if offset := _q.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := _q.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ForUpdate locks the selected rows against concurrent updates, and prevent them from being +// updated, deleted or "selected ... for update" by other sessions, until the transaction is +// either committed or rolled-back. +func (_q *IdempotencyRecordQuery) ForUpdate(opts ...sql.LockOption) *IdempotencyRecordQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForUpdate(opts...) + }) + return _q +} + +// ForShare behaves similarly to ForUpdate, except that it acquires a shared mode lock +// on any rows that are read. Other sessions can read the rows, but cannot modify them +// until your transaction commits. +func (_q *IdempotencyRecordQuery) ForShare(opts ...sql.LockOption) *IdempotencyRecordQuery { + if _q.driver.Dialect() == dialect.Postgres { + _q.Unique(false) + } + _q.modifiers = append(_q.modifiers, func(s *sql.Selector) { + s.ForShare(opts...) + }) + return _q +} + +// IdempotencyRecordGroupBy is the group-by builder for IdempotencyRecord entities. +type IdempotencyRecordGroupBy struct { + selector + build *IdempotencyRecordQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (_g *IdempotencyRecordGroupBy) Aggregate(fns ...AggregateFunc) *IdempotencyRecordGroupBy { + _g.fns = append(_g.fns, fns...) + return _g +} + +// Scan applies the selector query and scans the result into the given value. +func (_g *IdempotencyRecordGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _g.build.ctx, ent.OpQueryGroupBy) + if err := _g.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*IdempotencyRecordQuery, *IdempotencyRecordGroupBy](ctx, _g.build, _g, _g.build.inters, v) +} + +func (_g *IdempotencyRecordGroupBy) sqlScan(ctx context.Context, root *IdempotencyRecordQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(_g.fns)) + for _, fn := range _g.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*_g.flds)+len(_g.fns)) + for _, f := range *_g.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*_g.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _g.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// IdempotencyRecordSelect is the builder for selecting fields of IdempotencyRecord entities. +type IdempotencyRecordSelect struct { + *IdempotencyRecordQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (_s *IdempotencyRecordSelect) Aggregate(fns ...AggregateFunc) *IdempotencyRecordSelect { + _s.fns = append(_s.fns, fns...) + return _s +} + +// Scan applies the selector query and scans the result into the given value. +func (_s *IdempotencyRecordSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, _s.ctx, ent.OpQuerySelect) + if err := _s.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*IdempotencyRecordQuery, *IdempotencyRecordSelect](ctx, _s.IdempotencyRecordQuery, _s, _s.inters, v) +} + +func (_s *IdempotencyRecordSelect) sqlScan(ctx context.Context, root *IdempotencyRecordQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(_s.fns)) + for _, fn := range _s.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*_s.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := _s.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/backend/ent/idempotencyrecord_update.go b/backend/ent/idempotencyrecord_update.go new file mode 100644 index 000000000..f839e5c01 --- /dev/null +++ b/backend/ent/idempotencyrecord_update.go @@ -0,0 +1,676 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" + "github.com/Wei-Shaw/sub2api/ent/predicate" +) + +// IdempotencyRecordUpdate is the builder for updating IdempotencyRecord entities. +type IdempotencyRecordUpdate struct { + config + hooks []Hook + mutation *IdempotencyRecordMutation +} + +// Where appends a list predicates to the IdempotencyRecordUpdate builder. +func (_u *IdempotencyRecordUpdate) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordUpdate { + _u.mutation.Where(ps...) + return _u +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *IdempotencyRecordUpdate) SetUpdatedAt(v time.Time) *IdempotencyRecordUpdate { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetScope sets the "scope" field. +func (_u *IdempotencyRecordUpdate) SetScope(v string) *IdempotencyRecordUpdate { + _u.mutation.SetScope(v) + return _u +} + +// SetNillableScope sets the "scope" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableScope(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetScope(*v) + } + return _u +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (_u *IdempotencyRecordUpdate) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpdate { + _u.mutation.SetIdempotencyKeyHash(v) + return _u +} + +// SetNillableIdempotencyKeyHash sets the "idempotency_key_hash" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableIdempotencyKeyHash(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetIdempotencyKeyHash(*v) + } + return _u +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (_u *IdempotencyRecordUpdate) SetRequestFingerprint(v string) *IdempotencyRecordUpdate { + _u.mutation.SetRequestFingerprint(v) + return _u +} + +// SetNillableRequestFingerprint sets the "request_fingerprint" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableRequestFingerprint(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetRequestFingerprint(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *IdempotencyRecordUpdate) SetStatus(v string) *IdempotencyRecordUpdate { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableStatus(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetResponseStatus sets the "response_status" field. +func (_u *IdempotencyRecordUpdate) SetResponseStatus(v int) *IdempotencyRecordUpdate { + _u.mutation.ResetResponseStatus() + _u.mutation.SetResponseStatus(v) + return _u +} + +// SetNillableResponseStatus sets the "response_status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableResponseStatus(v *int) *IdempotencyRecordUpdate { + if v != nil { + _u.SetResponseStatus(*v) + } + return _u +} + +// AddResponseStatus adds value to the "response_status" field. +func (_u *IdempotencyRecordUpdate) AddResponseStatus(v int) *IdempotencyRecordUpdate { + _u.mutation.AddResponseStatus(v) + return _u +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (_u *IdempotencyRecordUpdate) ClearResponseStatus() *IdempotencyRecordUpdate { + _u.mutation.ClearResponseStatus() + return _u +} + +// SetResponseBody sets the "response_body" field. +func (_u *IdempotencyRecordUpdate) SetResponseBody(v string) *IdempotencyRecordUpdate { + _u.mutation.SetResponseBody(v) + return _u +} + +// SetNillableResponseBody sets the "response_body" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableResponseBody(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetResponseBody(*v) + } + return _u +} + +// ClearResponseBody clears the value of the "response_body" field. +func (_u *IdempotencyRecordUpdate) ClearResponseBody() *IdempotencyRecordUpdate { + _u.mutation.ClearResponseBody() + return _u +} + +// SetErrorReason sets the "error_reason" field. +func (_u *IdempotencyRecordUpdate) SetErrorReason(v string) *IdempotencyRecordUpdate { + _u.mutation.SetErrorReason(v) + return _u +} + +// SetNillableErrorReason sets the "error_reason" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableErrorReason(v *string) *IdempotencyRecordUpdate { + if v != nil { + _u.SetErrorReason(*v) + } + return _u +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (_u *IdempotencyRecordUpdate) ClearErrorReason() *IdempotencyRecordUpdate { + _u.mutation.ClearErrorReason() + return _u +} + +// SetLockedUntil sets the "locked_until" field. +func (_u *IdempotencyRecordUpdate) SetLockedUntil(v time.Time) *IdempotencyRecordUpdate { + _u.mutation.SetLockedUntil(v) + return _u +} + +// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordUpdate { + if v != nil { + _u.SetLockedUntil(*v) + } + return _u +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (_u *IdempotencyRecordUpdate) ClearLockedUntil() *IdempotencyRecordUpdate { + _u.mutation.ClearLockedUntil() + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *IdempotencyRecordUpdate) SetExpiresAt(v time.Time) *IdempotencyRecordUpdate { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *IdempotencyRecordUpdate) SetNillableExpiresAt(v *time.Time) *IdempotencyRecordUpdate { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// Mutation returns the IdempotencyRecordMutation object of the builder. +func (_u *IdempotencyRecordUpdate) Mutation() *IdempotencyRecordMutation { + return _u.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (_u *IdempotencyRecordUpdate) Save(ctx context.Context) (int, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *IdempotencyRecordUpdate) SaveX(ctx context.Context) int { + affected, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (_u *IdempotencyRecordUpdate) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *IdempotencyRecordUpdate) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *IdempotencyRecordUpdate) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := idempotencyrecord.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *IdempotencyRecordUpdate) check() error { + if v, ok := _u.mutation.Scope(); ok { + if err := idempotencyrecord.ScopeValidator(v); err != nil { + return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)} + } + } + if v, ok := _u.mutation.IdempotencyKeyHash(); ok { + if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil { + return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)} + } + } + if v, ok := _u.mutation.RequestFingerprint(); ok { + if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil { + return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := idempotencyrecord.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)} + } + } + if v, ok := _u.mutation.ErrorReason(); ok { + if err := idempotencyrecord.ErrorReasonValidator(v); err != nil { + return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)} + } + } + return nil +} + +func (_u *IdempotencyRecordUpdate) sqlSave(ctx context.Context) (_node int, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Scope(); ok { + _spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value) + } + if value, ok := _u.mutation.IdempotencyKeyHash(); ok { + _spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value) + } + if value, ok := _u.mutation.RequestFingerprint(); ok { + _spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ResponseStatus(); ok { + _spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedResponseStatus(); ok { + _spec.AddField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if _u.mutation.ResponseStatusCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseStatus, field.TypeInt) + } + if value, ok := _u.mutation.ResponseBody(); ok { + _spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value) + } + if _u.mutation.ResponseBodyCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseBody, field.TypeString) + } + if value, ok := _u.mutation.ErrorReason(); ok { + _spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value) + } + if _u.mutation.ErrorReasonCleared() { + _spec.ClearField(idempotencyrecord.FieldErrorReason, field.TypeString) + } + if value, ok := _u.mutation.LockedUntil(); ok { + _spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value) + } + if _u.mutation.LockedUntilCleared() { + _spec.ClearField(idempotencyrecord.FieldLockedUntil, field.TypeTime) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value) + } + if _node, err = sqlgraph.UpdateNodes(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{idempotencyrecord.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + _u.mutation.done = true + return _node, nil +} + +// IdempotencyRecordUpdateOne is the builder for updating a single IdempotencyRecord entity. +type IdempotencyRecordUpdateOne struct { + config + fields []string + hooks []Hook + mutation *IdempotencyRecordMutation +} + +// SetUpdatedAt sets the "updated_at" field. +func (_u *IdempotencyRecordUpdateOne) SetUpdatedAt(v time.Time) *IdempotencyRecordUpdateOne { + _u.mutation.SetUpdatedAt(v) + return _u +} + +// SetScope sets the "scope" field. +func (_u *IdempotencyRecordUpdateOne) SetScope(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetScope(v) + return _u +} + +// SetNillableScope sets the "scope" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableScope(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetScope(*v) + } + return _u +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (_u *IdempotencyRecordUpdateOne) SetIdempotencyKeyHash(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetIdempotencyKeyHash(v) + return _u +} + +// SetNillableIdempotencyKeyHash sets the "idempotency_key_hash" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableIdempotencyKeyHash(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetIdempotencyKeyHash(*v) + } + return _u +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (_u *IdempotencyRecordUpdateOne) SetRequestFingerprint(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetRequestFingerprint(v) + return _u +} + +// SetNillableRequestFingerprint sets the "request_fingerprint" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableRequestFingerprint(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetRequestFingerprint(*v) + } + return _u +} + +// SetStatus sets the "status" field. +func (_u *IdempotencyRecordUpdateOne) SetStatus(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetStatus(v) + return _u +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableStatus(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetStatus(*v) + } + return _u +} + +// SetResponseStatus sets the "response_status" field. +func (_u *IdempotencyRecordUpdateOne) SetResponseStatus(v int) *IdempotencyRecordUpdateOne { + _u.mutation.ResetResponseStatus() + _u.mutation.SetResponseStatus(v) + return _u +} + +// SetNillableResponseStatus sets the "response_status" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableResponseStatus(v *int) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetResponseStatus(*v) + } + return _u +} + +// AddResponseStatus adds value to the "response_status" field. +func (_u *IdempotencyRecordUpdateOne) AddResponseStatus(v int) *IdempotencyRecordUpdateOne { + _u.mutation.AddResponseStatus(v) + return _u +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (_u *IdempotencyRecordUpdateOne) ClearResponseStatus() *IdempotencyRecordUpdateOne { + _u.mutation.ClearResponseStatus() + return _u +} + +// SetResponseBody sets the "response_body" field. +func (_u *IdempotencyRecordUpdateOne) SetResponseBody(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetResponseBody(v) + return _u +} + +// SetNillableResponseBody sets the "response_body" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableResponseBody(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetResponseBody(*v) + } + return _u +} + +// ClearResponseBody clears the value of the "response_body" field. +func (_u *IdempotencyRecordUpdateOne) ClearResponseBody() *IdempotencyRecordUpdateOne { + _u.mutation.ClearResponseBody() + return _u +} + +// SetErrorReason sets the "error_reason" field. +func (_u *IdempotencyRecordUpdateOne) SetErrorReason(v string) *IdempotencyRecordUpdateOne { + _u.mutation.SetErrorReason(v) + return _u +} + +// SetNillableErrorReason sets the "error_reason" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableErrorReason(v *string) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetErrorReason(*v) + } + return _u +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (_u *IdempotencyRecordUpdateOne) ClearErrorReason() *IdempotencyRecordUpdateOne { + _u.mutation.ClearErrorReason() + return _u +} + +// SetLockedUntil sets the "locked_until" field. +func (_u *IdempotencyRecordUpdateOne) SetLockedUntil(v time.Time) *IdempotencyRecordUpdateOne { + _u.mutation.SetLockedUntil(v) + return _u +} + +// SetNillableLockedUntil sets the "locked_until" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableLockedUntil(v *time.Time) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetLockedUntil(*v) + } + return _u +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (_u *IdempotencyRecordUpdateOne) ClearLockedUntil() *IdempotencyRecordUpdateOne { + _u.mutation.ClearLockedUntil() + return _u +} + +// SetExpiresAt sets the "expires_at" field. +func (_u *IdempotencyRecordUpdateOne) SetExpiresAt(v time.Time) *IdempotencyRecordUpdateOne { + _u.mutation.SetExpiresAt(v) + return _u +} + +// SetNillableExpiresAt sets the "expires_at" field if the given value is not nil. +func (_u *IdempotencyRecordUpdateOne) SetNillableExpiresAt(v *time.Time) *IdempotencyRecordUpdateOne { + if v != nil { + _u.SetExpiresAt(*v) + } + return _u +} + +// Mutation returns the IdempotencyRecordMutation object of the builder. +func (_u *IdempotencyRecordUpdateOne) Mutation() *IdempotencyRecordMutation { + return _u.mutation +} + +// Where appends a list predicates to the IdempotencyRecordUpdate builder. +func (_u *IdempotencyRecordUpdateOne) Where(ps ...predicate.IdempotencyRecord) *IdempotencyRecordUpdateOne { + _u.mutation.Where(ps...) + return _u +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (_u *IdempotencyRecordUpdateOne) Select(field string, fields ...string) *IdempotencyRecordUpdateOne { + _u.fields = append([]string{field}, fields...) + return _u +} + +// Save executes the query and returns the updated IdempotencyRecord entity. +func (_u *IdempotencyRecordUpdateOne) Save(ctx context.Context) (*IdempotencyRecord, error) { + _u.defaults() + return withHooks(ctx, _u.sqlSave, _u.mutation, _u.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (_u *IdempotencyRecordUpdateOne) SaveX(ctx context.Context) *IdempotencyRecord { + node, err := _u.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (_u *IdempotencyRecordUpdateOne) Exec(ctx context.Context) error { + _, err := _u.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (_u *IdempotencyRecordUpdateOne) ExecX(ctx context.Context) { + if err := _u.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (_u *IdempotencyRecordUpdateOne) defaults() { + if _, ok := _u.mutation.UpdatedAt(); !ok { + v := idempotencyrecord.UpdateDefaultUpdatedAt() + _u.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (_u *IdempotencyRecordUpdateOne) check() error { + if v, ok := _u.mutation.Scope(); ok { + if err := idempotencyrecord.ScopeValidator(v); err != nil { + return &ValidationError{Name: "scope", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.scope": %w`, err)} + } + } + if v, ok := _u.mutation.IdempotencyKeyHash(); ok { + if err := idempotencyrecord.IdempotencyKeyHashValidator(v); err != nil { + return &ValidationError{Name: "idempotency_key_hash", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.idempotency_key_hash": %w`, err)} + } + } + if v, ok := _u.mutation.RequestFingerprint(); ok { + if err := idempotencyrecord.RequestFingerprintValidator(v); err != nil { + return &ValidationError{Name: "request_fingerprint", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.request_fingerprint": %w`, err)} + } + } + if v, ok := _u.mutation.Status(); ok { + if err := idempotencyrecord.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.status": %w`, err)} + } + } + if v, ok := _u.mutation.ErrorReason(); ok { + if err := idempotencyrecord.ErrorReasonValidator(v); err != nil { + return &ValidationError{Name: "error_reason", err: fmt.Errorf(`ent: validator failed for field "IdempotencyRecord.error_reason": %w`, err)} + } + } + return nil +} + +func (_u *IdempotencyRecordUpdateOne) sqlSave(ctx context.Context) (_node *IdempotencyRecord, err error) { + if err := _u.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(idempotencyrecord.Table, idempotencyrecord.Columns, sqlgraph.NewFieldSpec(idempotencyrecord.FieldID, field.TypeInt64)) + id, ok := _u.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "IdempotencyRecord.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := _u.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, idempotencyrecord.FieldID) + for _, f := range fields { + if !idempotencyrecord.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != idempotencyrecord.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := _u.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := _u.mutation.UpdatedAt(); ok { + _spec.SetField(idempotencyrecord.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := _u.mutation.Scope(); ok { + _spec.SetField(idempotencyrecord.FieldScope, field.TypeString, value) + } + if value, ok := _u.mutation.IdempotencyKeyHash(); ok { + _spec.SetField(idempotencyrecord.FieldIdempotencyKeyHash, field.TypeString, value) + } + if value, ok := _u.mutation.RequestFingerprint(); ok { + _spec.SetField(idempotencyrecord.FieldRequestFingerprint, field.TypeString, value) + } + if value, ok := _u.mutation.Status(); ok { + _spec.SetField(idempotencyrecord.FieldStatus, field.TypeString, value) + } + if value, ok := _u.mutation.ResponseStatus(); ok { + _spec.SetField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if value, ok := _u.mutation.AddedResponseStatus(); ok { + _spec.AddField(idempotencyrecord.FieldResponseStatus, field.TypeInt, value) + } + if _u.mutation.ResponseStatusCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseStatus, field.TypeInt) + } + if value, ok := _u.mutation.ResponseBody(); ok { + _spec.SetField(idempotencyrecord.FieldResponseBody, field.TypeString, value) + } + if _u.mutation.ResponseBodyCleared() { + _spec.ClearField(idempotencyrecord.FieldResponseBody, field.TypeString) + } + if value, ok := _u.mutation.ErrorReason(); ok { + _spec.SetField(idempotencyrecord.FieldErrorReason, field.TypeString, value) + } + if _u.mutation.ErrorReasonCleared() { + _spec.ClearField(idempotencyrecord.FieldErrorReason, field.TypeString) + } + if value, ok := _u.mutation.LockedUntil(); ok { + _spec.SetField(idempotencyrecord.FieldLockedUntil, field.TypeTime, value) + } + if _u.mutation.LockedUntilCleared() { + _spec.ClearField(idempotencyrecord.FieldLockedUntil, field.TypeTime) + } + if value, ok := _u.mutation.ExpiresAt(); ok { + _spec.SetField(idempotencyrecord.FieldExpiresAt, field.TypeTime, value) + } + _node = &IdempotencyRecord{config: _u.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, _u.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{idempotencyrecord.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + _u.mutation.done = true + return _node, nil +} diff --git a/backend/ent/intercept/intercept.go b/backend/ent/intercept/intercept.go index 290fb163b..e77464026 100644 --- a/backend/ent/intercept/intercept.go +++ b/backend/ent/intercept/intercept.go @@ -15,6 +15,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/predicate" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" @@ -276,6 +277,33 @@ func (f TraverseGroup) Traverse(ctx context.Context, q ent.Query) error { return fmt.Errorf("unexpected query type %T. expect *ent.GroupQuery", q) } +// The IdempotencyRecordFunc type is an adapter to allow the use of ordinary function as a Querier. +type IdempotencyRecordFunc func(context.Context, *ent.IdempotencyRecordQuery) (ent.Value, error) + +// Query calls f(ctx, q). +func (f IdempotencyRecordFunc) Query(ctx context.Context, q ent.Query) (ent.Value, error) { + if q, ok := q.(*ent.IdempotencyRecordQuery); ok { + return f(ctx, q) + } + return nil, fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q) +} + +// The TraverseIdempotencyRecord type is an adapter to allow the use of ordinary function as Traverser. +type TraverseIdempotencyRecord func(context.Context, *ent.IdempotencyRecordQuery) error + +// Intercept is a dummy implementation of Intercept that returns the next Querier in the pipeline. +func (f TraverseIdempotencyRecord) Intercept(next ent.Querier) ent.Querier { + return next +} + +// Traverse calls f(ctx, q). +func (f TraverseIdempotencyRecord) Traverse(ctx context.Context, q ent.Query) error { + if q, ok := q.(*ent.IdempotencyRecordQuery); ok { + return f(ctx, q) + } + return fmt.Errorf("unexpected query type %T. expect *ent.IdempotencyRecordQuery", q) +} + // The PromoCodeFunc type is an adapter to allow the use of ordinary function as a Querier. type PromoCodeFunc func(context.Context, *ent.PromoCodeQuery) (ent.Value, error) @@ -644,6 +672,8 @@ func NewQuery(q ent.Query) (Query, error) { return &query[*ent.ErrorPassthroughRuleQuery, predicate.ErrorPassthroughRule, errorpassthroughrule.OrderOption]{typ: ent.TypeErrorPassthroughRule, tq: q}, nil case *ent.GroupQuery: return &query[*ent.GroupQuery, predicate.Group, group.OrderOption]{typ: ent.TypeGroup, tq: q}, nil + case *ent.IdempotencyRecordQuery: + return &query[*ent.IdempotencyRecordQuery, predicate.IdempotencyRecord, idempotencyrecord.OrderOption]{typ: ent.TypeIdempotencyRecord, tq: q}, nil case *ent.PromoCodeQuery: return &query[*ent.PromoCodeQuery, predicate.PromoCode, promocode.OrderOption]{typ: ent.TypePromoCode, tq: q}, nil case *ent.PromoCodeUsageQuery: diff --git a/backend/ent/migrate/schema.go b/backend/ent/migrate/schema.go index aba00d4f3..d5afba8c3 100644 --- a/backend/ent/migrate/schema.go +++ b/backend/ent/migrate/schema.go @@ -108,6 +108,8 @@ var ( {Name: "rate_limited_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "rate_limit_reset_at", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "overload_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "temp_unschedulable_until", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "temp_unschedulable_reason", Type: field.TypeString, Nullable: true, SchemaType: map[string]string{"postgres": "text"}}, {Name: "session_window_start", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "session_window_end", Type: field.TypeTime, Nullable: true, SchemaType: map[string]string{"postgres": "timestamptz"}}, {Name: "session_window_status", Type: field.TypeString, Nullable: true, Size: 20}, @@ -121,7 +123,7 @@ var ( ForeignKeys: []*schema.ForeignKey{ { Symbol: "accounts_proxies_proxy", - Columns: []*schema.Column{AccountsColumns[25]}, + Columns: []*schema.Column{AccountsColumns[27]}, RefColumns: []*schema.Column{ProxiesColumns[0]}, OnDelete: schema.SetNull, }, @@ -145,7 +147,7 @@ var ( { Name: "account_proxy_id", Unique: false, - Columns: []*schema.Column{AccountsColumns[25]}, + Columns: []*schema.Column{AccountsColumns[27]}, }, { Name: "account_priority", @@ -177,6 +179,16 @@ var ( Unique: false, Columns: []*schema.Column{AccountsColumns[21]}, }, + { + Name: "account_platform_priority", + Unique: false, + Columns: []*schema.Column{AccountsColumns[6], AccountsColumns[11]}, + }, + { + Name: "account_priority_status", + Unique: false, + Columns: []*schema.Column{AccountsColumns[11], AccountsColumns[13]}, + }, { Name: "account_deleted_at", Unique: false, @@ -423,6 +435,44 @@ var ( }, }, } + // IdempotencyRecordsColumns holds the columns for the "idempotency_records" table. + IdempotencyRecordsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt64, Increment: true}, + {Name: "created_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "updated_at", Type: field.TypeTime, SchemaType: map[string]string{"postgres": "timestamptz"}}, + {Name: "scope", Type: field.TypeString, Size: 128}, + {Name: "idempotency_key_hash", Type: field.TypeString, Size: 64}, + {Name: "request_fingerprint", Type: field.TypeString, Size: 64}, + {Name: "status", Type: field.TypeString, Size: 32}, + {Name: "response_status", Type: field.TypeInt, Nullable: true}, + {Name: "response_body", Type: field.TypeString, Nullable: true}, + {Name: "error_reason", Type: field.TypeString, Nullable: true, Size: 128}, + {Name: "locked_until", Type: field.TypeTime, Nullable: true}, + {Name: "expires_at", Type: field.TypeTime}, + } + // IdempotencyRecordsTable holds the schema information for the "idempotency_records" table. + IdempotencyRecordsTable = &schema.Table{ + Name: "idempotency_records", + Columns: IdempotencyRecordsColumns, + PrimaryKey: []*schema.Column{IdempotencyRecordsColumns[0]}, + Indexes: []*schema.Index{ + { + Name: "idempotencyrecord_scope_idempotency_key_hash", + Unique: true, + Columns: []*schema.Column{IdempotencyRecordsColumns[3], IdempotencyRecordsColumns[4]}, + }, + { + Name: "idempotencyrecord_expires_at", + Unique: false, + Columns: []*schema.Column{IdempotencyRecordsColumns[11]}, + }, + { + Name: "idempotencyrecord_status_locked_until", + Unique: false, + Columns: []*schema.Column{IdempotencyRecordsColumns[6], IdempotencyRecordsColumns[10]}, + }, + }, + } // PromoCodesColumns holds the columns for the "promo_codes" table. PromoCodesColumns = []*schema.Column{ {Name: "id", Type: field.TypeInt64, Increment: true}, @@ -771,6 +821,11 @@ var ( Unique: false, Columns: []*schema.Column{UsageLogsColumns[28], UsageLogsColumns[27]}, }, + { + Name: "usagelog_group_id_created_at", + Unique: false, + Columns: []*schema.Column{UsageLogsColumns[30], UsageLogsColumns[27]}, + }, }, } // UsersColumns holds the columns for the "users" table. @@ -995,6 +1050,11 @@ var ( Unique: false, Columns: []*schema.Column{UserSubscriptionsColumns[5]}, }, + { + Name: "usersubscription_user_id_status_expires_at", + Unique: false, + Columns: []*schema.Column{UserSubscriptionsColumns[16], UserSubscriptionsColumns[6], UserSubscriptionsColumns[5]}, + }, { Name: "usersubscription_assigned_by", Unique: false, @@ -1021,6 +1081,7 @@ var ( AnnouncementReadsTable, ErrorPassthroughRulesTable, GroupsTable, + IdempotencyRecordsTable, PromoCodesTable, PromoCodeUsagesTable, ProxiesTable, @@ -1066,6 +1127,9 @@ func init() { GroupsTable.Annotation = &entsql.Annotation{ Table: "groups", } + IdempotencyRecordsTable.Annotation = &entsql.Annotation{ + Table: "idempotency_records", + } PromoCodesTable.Annotation = &entsql.Annotation{ Table: "promo_codes", } diff --git a/backend/ent/mutation.go b/backend/ent/mutation.go index 7d5bf180d..4c8aca45c 100644 --- a/backend/ent/mutation.go +++ b/backend/ent/mutation.go @@ -19,6 +19,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/predicate" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" @@ -52,6 +53,7 @@ const ( TypeAnnouncementRead = "AnnouncementRead" TypeErrorPassthroughRule = "ErrorPassthroughRule" TypeGroup = "Group" + TypeIdempotencyRecord = "IdempotencyRecord" TypePromoCode = "PromoCode" TypePromoCodeUsage = "PromoCodeUsage" TypeProxy = "Proxy" @@ -1503,48 +1505,50 @@ func (m *APIKeyMutation) ResetEdge(name string) error { // AccountMutation represents an operation that mutates the Account nodes in the graph. type AccountMutation struct { config - op Op - typ string - id *int64 - created_at *time.Time - updated_at *time.Time - deleted_at *time.Time - name *string - notes *string - platform *string - _type *string - credentials *map[string]interface{} - extra *map[string]interface{} - concurrency *int - addconcurrency *int - priority *int - addpriority *int - rate_multiplier *float64 - addrate_multiplier *float64 - status *string - error_message *string - last_used_at *time.Time - expires_at *time.Time - auto_pause_on_expired *bool - schedulable *bool - rate_limited_at *time.Time - rate_limit_reset_at *time.Time - overload_until *time.Time - session_window_start *time.Time - session_window_end *time.Time - session_window_status *string - clearedFields map[string]struct{} - groups map[int64]struct{} - removedgroups map[int64]struct{} - clearedgroups bool - proxy *int64 - clearedproxy bool - usage_logs map[int64]struct{} - removedusage_logs map[int64]struct{} - clearedusage_logs bool - done bool - oldValue func(context.Context) (*Account, error) - predicates []predicate.Account + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + deleted_at *time.Time + name *string + notes *string + platform *string + _type *string + credentials *map[string]interface{} + extra *map[string]interface{} + concurrency *int + addconcurrency *int + priority *int + addpriority *int + rate_multiplier *float64 + addrate_multiplier *float64 + status *string + error_message *string + last_used_at *time.Time + expires_at *time.Time + auto_pause_on_expired *bool + schedulable *bool + rate_limited_at *time.Time + rate_limit_reset_at *time.Time + overload_until *time.Time + temp_unschedulable_until *time.Time + temp_unschedulable_reason *string + session_window_start *time.Time + session_window_end *time.Time + session_window_status *string + clearedFields map[string]struct{} + groups map[int64]struct{} + removedgroups map[int64]struct{} + clearedgroups bool + proxy *int64 + clearedproxy bool + usage_logs map[int64]struct{} + removedusage_logs map[int64]struct{} + clearedusage_logs bool + done bool + oldValue func(context.Context) (*Account, error) + predicates []predicate.Account } var _ ent.Mutation = (*AccountMutation)(nil) @@ -2614,6 +2618,104 @@ func (m *AccountMutation) ResetOverloadUntil() { delete(m.clearedFields, account.FieldOverloadUntil) } +// SetTempUnschedulableUntil sets the "temp_unschedulable_until" field. +func (m *AccountMutation) SetTempUnschedulableUntil(t time.Time) { + m.temp_unschedulable_until = &t +} + +// TempUnschedulableUntil returns the value of the "temp_unschedulable_until" field in the mutation. +func (m *AccountMutation) TempUnschedulableUntil() (r time.Time, exists bool) { + v := m.temp_unschedulable_until + if v == nil { + return + } + return *v, true +} + +// OldTempUnschedulableUntil returns the old "temp_unschedulable_until" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldTempUnschedulableUntil(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTempUnschedulableUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTempUnschedulableUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTempUnschedulableUntil: %w", err) + } + return oldValue.TempUnschedulableUntil, nil +} + +// ClearTempUnschedulableUntil clears the value of the "temp_unschedulable_until" field. +func (m *AccountMutation) ClearTempUnschedulableUntil() { + m.temp_unschedulable_until = nil + m.clearedFields[account.FieldTempUnschedulableUntil] = struct{}{} +} + +// TempUnschedulableUntilCleared returns if the "temp_unschedulable_until" field was cleared in this mutation. +func (m *AccountMutation) TempUnschedulableUntilCleared() bool { + _, ok := m.clearedFields[account.FieldTempUnschedulableUntil] + return ok +} + +// ResetTempUnschedulableUntil resets all changes to the "temp_unschedulable_until" field. +func (m *AccountMutation) ResetTempUnschedulableUntil() { + m.temp_unschedulable_until = nil + delete(m.clearedFields, account.FieldTempUnschedulableUntil) +} + +// SetTempUnschedulableReason sets the "temp_unschedulable_reason" field. +func (m *AccountMutation) SetTempUnschedulableReason(s string) { + m.temp_unschedulable_reason = &s +} + +// TempUnschedulableReason returns the value of the "temp_unschedulable_reason" field in the mutation. +func (m *AccountMutation) TempUnschedulableReason() (r string, exists bool) { + v := m.temp_unschedulable_reason + if v == nil { + return + } + return *v, true +} + +// OldTempUnschedulableReason returns the old "temp_unschedulable_reason" field's value of the Account entity. +// If the Account object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AccountMutation) OldTempUnschedulableReason(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldTempUnschedulableReason is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldTempUnschedulableReason requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldTempUnschedulableReason: %w", err) + } + return oldValue.TempUnschedulableReason, nil +} + +// ClearTempUnschedulableReason clears the value of the "temp_unschedulable_reason" field. +func (m *AccountMutation) ClearTempUnschedulableReason() { + m.temp_unschedulable_reason = nil + m.clearedFields[account.FieldTempUnschedulableReason] = struct{}{} +} + +// TempUnschedulableReasonCleared returns if the "temp_unschedulable_reason" field was cleared in this mutation. +func (m *AccountMutation) TempUnschedulableReasonCleared() bool { + _, ok := m.clearedFields[account.FieldTempUnschedulableReason] + return ok +} + +// ResetTempUnschedulableReason resets all changes to the "temp_unschedulable_reason" field. +func (m *AccountMutation) ResetTempUnschedulableReason() { + m.temp_unschedulable_reason = nil + delete(m.clearedFields, account.FieldTempUnschedulableReason) +} + // SetSessionWindowStart sets the "session_window_start" field. func (m *AccountMutation) SetSessionWindowStart(t time.Time) { m.session_window_start = &t @@ -2930,7 +3032,7 @@ func (m *AccountMutation) Type() string { // order to get all numeric fields that were incremented/decremented, call // AddedFields(). func (m *AccountMutation) Fields() []string { - fields := make([]string, 0, 25) + fields := make([]string, 0, 27) if m.created_at != nil { fields = append(fields, account.FieldCreatedAt) } @@ -2997,6 +3099,12 @@ func (m *AccountMutation) Fields() []string { if m.overload_until != nil { fields = append(fields, account.FieldOverloadUntil) } + if m.temp_unschedulable_until != nil { + fields = append(fields, account.FieldTempUnschedulableUntil) + } + if m.temp_unschedulable_reason != nil { + fields = append(fields, account.FieldTempUnschedulableReason) + } if m.session_window_start != nil { fields = append(fields, account.FieldSessionWindowStart) } @@ -3058,6 +3166,10 @@ func (m *AccountMutation) Field(name string) (ent.Value, bool) { return m.RateLimitResetAt() case account.FieldOverloadUntil: return m.OverloadUntil() + case account.FieldTempUnschedulableUntil: + return m.TempUnschedulableUntil() + case account.FieldTempUnschedulableReason: + return m.TempUnschedulableReason() case account.FieldSessionWindowStart: return m.SessionWindowStart() case account.FieldSessionWindowEnd: @@ -3117,6 +3229,10 @@ func (m *AccountMutation) OldField(ctx context.Context, name string) (ent.Value, return m.OldRateLimitResetAt(ctx) case account.FieldOverloadUntil: return m.OldOverloadUntil(ctx) + case account.FieldTempUnschedulableUntil: + return m.OldTempUnschedulableUntil(ctx) + case account.FieldTempUnschedulableReason: + return m.OldTempUnschedulableReason(ctx) case account.FieldSessionWindowStart: return m.OldSessionWindowStart(ctx) case account.FieldSessionWindowEnd: @@ -3286,6 +3402,20 @@ func (m *AccountMutation) SetField(name string, value ent.Value) error { } m.SetOverloadUntil(v) return nil + case account.FieldTempUnschedulableUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTempUnschedulableUntil(v) + return nil + case account.FieldTempUnschedulableReason: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetTempUnschedulableReason(v) + return nil case account.FieldSessionWindowStart: v, ok := value.(time.Time) if !ok { @@ -3403,6 +3533,12 @@ func (m *AccountMutation) ClearedFields() []string { if m.FieldCleared(account.FieldOverloadUntil) { fields = append(fields, account.FieldOverloadUntil) } + if m.FieldCleared(account.FieldTempUnschedulableUntil) { + fields = append(fields, account.FieldTempUnschedulableUntil) + } + if m.FieldCleared(account.FieldTempUnschedulableReason) { + fields = append(fields, account.FieldTempUnschedulableReason) + } if m.FieldCleared(account.FieldSessionWindowStart) { fields = append(fields, account.FieldSessionWindowStart) } @@ -3453,6 +3589,12 @@ func (m *AccountMutation) ClearField(name string) error { case account.FieldOverloadUntil: m.ClearOverloadUntil() return nil + case account.FieldTempUnschedulableUntil: + m.ClearTempUnschedulableUntil() + return nil + case account.FieldTempUnschedulableReason: + m.ClearTempUnschedulableReason() + return nil case account.FieldSessionWindowStart: m.ClearSessionWindowStart() return nil @@ -3536,6 +3678,12 @@ func (m *AccountMutation) ResetField(name string) error { case account.FieldOverloadUntil: m.ResetOverloadUntil() return nil + case account.FieldTempUnschedulableUntil: + m.ResetTempUnschedulableUntil() + return nil + case account.FieldTempUnschedulableReason: + m.ResetTempUnschedulableReason() + return nil case account.FieldSessionWindowStart: m.ResetSessionWindowStart() return nil @@ -10307,6 +10455,988 @@ func (m *GroupMutation) ResetEdge(name string) error { return fmt.Errorf("unknown Group edge %s", name) } +// IdempotencyRecordMutation represents an operation that mutates the IdempotencyRecord nodes in the graph. +type IdempotencyRecordMutation struct { + config + op Op + typ string + id *int64 + created_at *time.Time + updated_at *time.Time + scope *string + idempotency_key_hash *string + request_fingerprint *string + status *string + response_status *int + addresponse_status *int + response_body *string + error_reason *string + locked_until *time.Time + expires_at *time.Time + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*IdempotencyRecord, error) + predicates []predicate.IdempotencyRecord +} + +var _ ent.Mutation = (*IdempotencyRecordMutation)(nil) + +// idempotencyrecordOption allows management of the mutation configuration using functional options. +type idempotencyrecordOption func(*IdempotencyRecordMutation) + +// newIdempotencyRecordMutation creates new mutation for the IdempotencyRecord entity. +func newIdempotencyRecordMutation(c config, op Op, opts ...idempotencyrecordOption) *IdempotencyRecordMutation { + m := &IdempotencyRecordMutation{ + config: c, + op: op, + typ: TypeIdempotencyRecord, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withIdempotencyRecordID sets the ID field of the mutation. +func withIdempotencyRecordID(id int64) idempotencyrecordOption { + return func(m *IdempotencyRecordMutation) { + var ( + err error + once sync.Once + value *IdempotencyRecord + ) + m.oldValue = func(ctx context.Context) (*IdempotencyRecord, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().IdempotencyRecord.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withIdempotencyRecord sets the old IdempotencyRecord of the mutation. +func withIdempotencyRecord(node *IdempotencyRecord) idempotencyrecordOption { + return func(m *IdempotencyRecordMutation) { + m.oldValue = func(context.Context) (*IdempotencyRecord, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m IdempotencyRecordMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m IdempotencyRecordMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *IdempotencyRecordMutation) ID() (id int64, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *IdempotencyRecordMutation) IDs(ctx context.Context) ([]int64, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int64{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().IdempotencyRecord.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetCreatedAt sets the "created_at" field. +func (m *IdempotencyRecordMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *IdempotencyRecordMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *IdempotencyRecordMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *IdempotencyRecordMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *IdempotencyRecordMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *IdempotencyRecordMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetScope sets the "scope" field. +func (m *IdempotencyRecordMutation) SetScope(s string) { + m.scope = &s +} + +// Scope returns the value of the "scope" field in the mutation. +func (m *IdempotencyRecordMutation) Scope() (r string, exists bool) { + v := m.scope + if v == nil { + return + } + return *v, true +} + +// OldScope returns the old "scope" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldScope(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldScope is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldScope requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldScope: %w", err) + } + return oldValue.Scope, nil +} + +// ResetScope resets all changes to the "scope" field. +func (m *IdempotencyRecordMutation) ResetScope() { + m.scope = nil +} + +// SetIdempotencyKeyHash sets the "idempotency_key_hash" field. +func (m *IdempotencyRecordMutation) SetIdempotencyKeyHash(s string) { + m.idempotency_key_hash = &s +} + +// IdempotencyKeyHash returns the value of the "idempotency_key_hash" field in the mutation. +func (m *IdempotencyRecordMutation) IdempotencyKeyHash() (r string, exists bool) { + v := m.idempotency_key_hash + if v == nil { + return + } + return *v, true +} + +// OldIdempotencyKeyHash returns the old "idempotency_key_hash" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldIdempotencyKeyHash(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIdempotencyKeyHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIdempotencyKeyHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIdempotencyKeyHash: %w", err) + } + return oldValue.IdempotencyKeyHash, nil +} + +// ResetIdempotencyKeyHash resets all changes to the "idempotency_key_hash" field. +func (m *IdempotencyRecordMutation) ResetIdempotencyKeyHash() { + m.idempotency_key_hash = nil +} + +// SetRequestFingerprint sets the "request_fingerprint" field. +func (m *IdempotencyRecordMutation) SetRequestFingerprint(s string) { + m.request_fingerprint = &s +} + +// RequestFingerprint returns the value of the "request_fingerprint" field in the mutation. +func (m *IdempotencyRecordMutation) RequestFingerprint() (r string, exists bool) { + v := m.request_fingerprint + if v == nil { + return + } + return *v, true +} + +// OldRequestFingerprint returns the old "request_fingerprint" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldRequestFingerprint(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldRequestFingerprint is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldRequestFingerprint requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldRequestFingerprint: %w", err) + } + return oldValue.RequestFingerprint, nil +} + +// ResetRequestFingerprint resets all changes to the "request_fingerprint" field. +func (m *IdempotencyRecordMutation) ResetRequestFingerprint() { + m.request_fingerprint = nil +} + +// SetStatus sets the "status" field. +func (m *IdempotencyRecordMutation) SetStatus(s string) { + m.status = &s +} + +// Status returns the value of the "status" field in the mutation. +func (m *IdempotencyRecordMutation) Status() (r string, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldStatus(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *IdempotencyRecordMutation) ResetStatus() { + m.status = nil +} + +// SetResponseStatus sets the "response_status" field. +func (m *IdempotencyRecordMutation) SetResponseStatus(i int) { + m.response_status = &i + m.addresponse_status = nil +} + +// ResponseStatus returns the value of the "response_status" field in the mutation. +func (m *IdempotencyRecordMutation) ResponseStatus() (r int, exists bool) { + v := m.response_status + if v == nil { + return + } + return *v, true +} + +// OldResponseStatus returns the old "response_status" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldResponseStatus(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResponseStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResponseStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResponseStatus: %w", err) + } + return oldValue.ResponseStatus, nil +} + +// AddResponseStatus adds i to the "response_status" field. +func (m *IdempotencyRecordMutation) AddResponseStatus(i int) { + if m.addresponse_status != nil { + *m.addresponse_status += i + } else { + m.addresponse_status = &i + } +} + +// AddedResponseStatus returns the value that was added to the "response_status" field in this mutation. +func (m *IdempotencyRecordMutation) AddedResponseStatus() (r int, exists bool) { + v := m.addresponse_status + if v == nil { + return + } + return *v, true +} + +// ClearResponseStatus clears the value of the "response_status" field. +func (m *IdempotencyRecordMutation) ClearResponseStatus() { + m.response_status = nil + m.addresponse_status = nil + m.clearedFields[idempotencyrecord.FieldResponseStatus] = struct{}{} +} + +// ResponseStatusCleared returns if the "response_status" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) ResponseStatusCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldResponseStatus] + return ok +} + +// ResetResponseStatus resets all changes to the "response_status" field. +func (m *IdempotencyRecordMutation) ResetResponseStatus() { + m.response_status = nil + m.addresponse_status = nil + delete(m.clearedFields, idempotencyrecord.FieldResponseStatus) +} + +// SetResponseBody sets the "response_body" field. +func (m *IdempotencyRecordMutation) SetResponseBody(s string) { + m.response_body = &s +} + +// ResponseBody returns the value of the "response_body" field in the mutation. +func (m *IdempotencyRecordMutation) ResponseBody() (r string, exists bool) { + v := m.response_body + if v == nil { + return + } + return *v, true +} + +// OldResponseBody returns the old "response_body" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldResponseBody(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldResponseBody is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldResponseBody requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldResponseBody: %w", err) + } + return oldValue.ResponseBody, nil +} + +// ClearResponseBody clears the value of the "response_body" field. +func (m *IdempotencyRecordMutation) ClearResponseBody() { + m.response_body = nil + m.clearedFields[idempotencyrecord.FieldResponseBody] = struct{}{} +} + +// ResponseBodyCleared returns if the "response_body" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) ResponseBodyCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldResponseBody] + return ok +} + +// ResetResponseBody resets all changes to the "response_body" field. +func (m *IdempotencyRecordMutation) ResetResponseBody() { + m.response_body = nil + delete(m.clearedFields, idempotencyrecord.FieldResponseBody) +} + +// SetErrorReason sets the "error_reason" field. +func (m *IdempotencyRecordMutation) SetErrorReason(s string) { + m.error_reason = &s +} + +// ErrorReason returns the value of the "error_reason" field in the mutation. +func (m *IdempotencyRecordMutation) ErrorReason() (r string, exists bool) { + v := m.error_reason + if v == nil { + return + } + return *v, true +} + +// OldErrorReason returns the old "error_reason" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldErrorReason(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldErrorReason is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldErrorReason requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldErrorReason: %w", err) + } + return oldValue.ErrorReason, nil +} + +// ClearErrorReason clears the value of the "error_reason" field. +func (m *IdempotencyRecordMutation) ClearErrorReason() { + m.error_reason = nil + m.clearedFields[idempotencyrecord.FieldErrorReason] = struct{}{} +} + +// ErrorReasonCleared returns if the "error_reason" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) ErrorReasonCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldErrorReason] + return ok +} + +// ResetErrorReason resets all changes to the "error_reason" field. +func (m *IdempotencyRecordMutation) ResetErrorReason() { + m.error_reason = nil + delete(m.clearedFields, idempotencyrecord.FieldErrorReason) +} + +// SetLockedUntil sets the "locked_until" field. +func (m *IdempotencyRecordMutation) SetLockedUntil(t time.Time) { + m.locked_until = &t +} + +// LockedUntil returns the value of the "locked_until" field in the mutation. +func (m *IdempotencyRecordMutation) LockedUntil() (r time.Time, exists bool) { + v := m.locked_until + if v == nil { + return + } + return *v, true +} + +// OldLockedUntil returns the old "locked_until" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldLockedUntil(ctx context.Context) (v *time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldLockedUntil is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldLockedUntil requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldLockedUntil: %w", err) + } + return oldValue.LockedUntil, nil +} + +// ClearLockedUntil clears the value of the "locked_until" field. +func (m *IdempotencyRecordMutation) ClearLockedUntil() { + m.locked_until = nil + m.clearedFields[idempotencyrecord.FieldLockedUntil] = struct{}{} +} + +// LockedUntilCleared returns if the "locked_until" field was cleared in this mutation. +func (m *IdempotencyRecordMutation) LockedUntilCleared() bool { + _, ok := m.clearedFields[idempotencyrecord.FieldLockedUntil] + return ok +} + +// ResetLockedUntil resets all changes to the "locked_until" field. +func (m *IdempotencyRecordMutation) ResetLockedUntil() { + m.locked_until = nil + delete(m.clearedFields, idempotencyrecord.FieldLockedUntil) +} + +// SetExpiresAt sets the "expires_at" field. +func (m *IdempotencyRecordMutation) SetExpiresAt(t time.Time) { + m.expires_at = &t +} + +// ExpiresAt returns the value of the "expires_at" field in the mutation. +func (m *IdempotencyRecordMutation) ExpiresAt() (r time.Time, exists bool) { + v := m.expires_at + if v == nil { + return + } + return *v, true +} + +// OldExpiresAt returns the old "expires_at" field's value of the IdempotencyRecord entity. +// If the IdempotencyRecord object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *IdempotencyRecordMutation) OldExpiresAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExpiresAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExpiresAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExpiresAt: %w", err) + } + return oldValue.ExpiresAt, nil +} + +// ResetExpiresAt resets all changes to the "expires_at" field. +func (m *IdempotencyRecordMutation) ResetExpiresAt() { + m.expires_at = nil +} + +// Where appends a list predicates to the IdempotencyRecordMutation builder. +func (m *IdempotencyRecordMutation) Where(ps ...predicate.IdempotencyRecord) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the IdempotencyRecordMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *IdempotencyRecordMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.IdempotencyRecord, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *IdempotencyRecordMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *IdempotencyRecordMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (IdempotencyRecord). +func (m *IdempotencyRecordMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *IdempotencyRecordMutation) Fields() []string { + fields := make([]string, 0, 11) + if m.created_at != nil { + fields = append(fields, idempotencyrecord.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, idempotencyrecord.FieldUpdatedAt) + } + if m.scope != nil { + fields = append(fields, idempotencyrecord.FieldScope) + } + if m.idempotency_key_hash != nil { + fields = append(fields, idempotencyrecord.FieldIdempotencyKeyHash) + } + if m.request_fingerprint != nil { + fields = append(fields, idempotencyrecord.FieldRequestFingerprint) + } + if m.status != nil { + fields = append(fields, idempotencyrecord.FieldStatus) + } + if m.response_status != nil { + fields = append(fields, idempotencyrecord.FieldResponseStatus) + } + if m.response_body != nil { + fields = append(fields, idempotencyrecord.FieldResponseBody) + } + if m.error_reason != nil { + fields = append(fields, idempotencyrecord.FieldErrorReason) + } + if m.locked_until != nil { + fields = append(fields, idempotencyrecord.FieldLockedUntil) + } + if m.expires_at != nil { + fields = append(fields, idempotencyrecord.FieldExpiresAt) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *IdempotencyRecordMutation) Field(name string) (ent.Value, bool) { + switch name { + case idempotencyrecord.FieldCreatedAt: + return m.CreatedAt() + case idempotencyrecord.FieldUpdatedAt: + return m.UpdatedAt() + case idempotencyrecord.FieldScope: + return m.Scope() + case idempotencyrecord.FieldIdempotencyKeyHash: + return m.IdempotencyKeyHash() + case idempotencyrecord.FieldRequestFingerprint: + return m.RequestFingerprint() + case idempotencyrecord.FieldStatus: + return m.Status() + case idempotencyrecord.FieldResponseStatus: + return m.ResponseStatus() + case idempotencyrecord.FieldResponseBody: + return m.ResponseBody() + case idempotencyrecord.FieldErrorReason: + return m.ErrorReason() + case idempotencyrecord.FieldLockedUntil: + return m.LockedUntil() + case idempotencyrecord.FieldExpiresAt: + return m.ExpiresAt() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *IdempotencyRecordMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case idempotencyrecord.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case idempotencyrecord.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case idempotencyrecord.FieldScope: + return m.OldScope(ctx) + case idempotencyrecord.FieldIdempotencyKeyHash: + return m.OldIdempotencyKeyHash(ctx) + case idempotencyrecord.FieldRequestFingerprint: + return m.OldRequestFingerprint(ctx) + case idempotencyrecord.FieldStatus: + return m.OldStatus(ctx) + case idempotencyrecord.FieldResponseStatus: + return m.OldResponseStatus(ctx) + case idempotencyrecord.FieldResponseBody: + return m.OldResponseBody(ctx) + case idempotencyrecord.FieldErrorReason: + return m.OldErrorReason(ctx) + case idempotencyrecord.FieldLockedUntil: + return m.OldLockedUntil(ctx) + case idempotencyrecord.FieldExpiresAt: + return m.OldExpiresAt(ctx) + } + return nil, fmt.Errorf("unknown IdempotencyRecord field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *IdempotencyRecordMutation) SetField(name string, value ent.Value) error { + switch name { + case idempotencyrecord.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case idempotencyrecord.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case idempotencyrecord.FieldScope: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetScope(v) + return nil + case idempotencyrecord.FieldIdempotencyKeyHash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIdempotencyKeyHash(v) + return nil + case idempotencyrecord.FieldRequestFingerprint: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetRequestFingerprint(v) + return nil + case idempotencyrecord.FieldStatus: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + case idempotencyrecord.FieldResponseStatus: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResponseStatus(v) + return nil + case idempotencyrecord.FieldResponseBody: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetResponseBody(v) + return nil + case idempotencyrecord.FieldErrorReason: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetErrorReason(v) + return nil + case idempotencyrecord.FieldLockedUntil: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetLockedUntil(v) + return nil + case idempotencyrecord.FieldExpiresAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExpiresAt(v) + return nil + } + return fmt.Errorf("unknown IdempotencyRecord field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *IdempotencyRecordMutation) AddedFields() []string { + var fields []string + if m.addresponse_status != nil { + fields = append(fields, idempotencyrecord.FieldResponseStatus) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *IdempotencyRecordMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case idempotencyrecord.FieldResponseStatus: + return m.AddedResponseStatus() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *IdempotencyRecordMutation) AddField(name string, value ent.Value) error { + switch name { + case idempotencyrecord.FieldResponseStatus: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddResponseStatus(v) + return nil + } + return fmt.Errorf("unknown IdempotencyRecord numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *IdempotencyRecordMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(idempotencyrecord.FieldResponseStatus) { + fields = append(fields, idempotencyrecord.FieldResponseStatus) + } + if m.FieldCleared(idempotencyrecord.FieldResponseBody) { + fields = append(fields, idempotencyrecord.FieldResponseBody) + } + if m.FieldCleared(idempotencyrecord.FieldErrorReason) { + fields = append(fields, idempotencyrecord.FieldErrorReason) + } + if m.FieldCleared(idempotencyrecord.FieldLockedUntil) { + fields = append(fields, idempotencyrecord.FieldLockedUntil) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *IdempotencyRecordMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *IdempotencyRecordMutation) ClearField(name string) error { + switch name { + case idempotencyrecord.FieldResponseStatus: + m.ClearResponseStatus() + return nil + case idempotencyrecord.FieldResponseBody: + m.ClearResponseBody() + return nil + case idempotencyrecord.FieldErrorReason: + m.ClearErrorReason() + return nil + case idempotencyrecord.FieldLockedUntil: + m.ClearLockedUntil() + return nil + } + return fmt.Errorf("unknown IdempotencyRecord nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *IdempotencyRecordMutation) ResetField(name string) error { + switch name { + case idempotencyrecord.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case idempotencyrecord.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case idempotencyrecord.FieldScope: + m.ResetScope() + return nil + case idempotencyrecord.FieldIdempotencyKeyHash: + m.ResetIdempotencyKeyHash() + return nil + case idempotencyrecord.FieldRequestFingerprint: + m.ResetRequestFingerprint() + return nil + case idempotencyrecord.FieldStatus: + m.ResetStatus() + return nil + case idempotencyrecord.FieldResponseStatus: + m.ResetResponseStatus() + return nil + case idempotencyrecord.FieldResponseBody: + m.ResetResponseBody() + return nil + case idempotencyrecord.FieldErrorReason: + m.ResetErrorReason() + return nil + case idempotencyrecord.FieldLockedUntil: + m.ResetLockedUntil() + return nil + case idempotencyrecord.FieldExpiresAt: + m.ResetExpiresAt() + return nil + } + return fmt.Errorf("unknown IdempotencyRecord field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *IdempotencyRecordMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *IdempotencyRecordMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *IdempotencyRecordMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *IdempotencyRecordMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *IdempotencyRecordMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *IdempotencyRecordMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *IdempotencyRecordMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown IdempotencyRecord unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *IdempotencyRecordMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown IdempotencyRecord edge %s", name) +} + // PromoCodeMutation represents an operation that mutates the PromoCode nodes in the graph. type PromoCodeMutation struct { config diff --git a/backend/ent/predicate/predicate.go b/backend/ent/predicate/predicate.go index 584b9606e..89d933fcd 100644 --- a/backend/ent/predicate/predicate.go +++ b/backend/ent/predicate/predicate.go @@ -27,6 +27,9 @@ type ErrorPassthroughRule func(*sql.Selector) // Group is the predicate function for group builders. type Group func(*sql.Selector) +// IdempotencyRecord is the predicate function for idempotencyrecord builders. +type IdempotencyRecord func(*sql.Selector) + // PromoCode is the predicate function for promocode builders. type PromoCode func(*sql.Selector) diff --git a/backend/ent/runtime/runtime.go b/backend/ent/runtime/runtime.go index ff3f8f26a..96fd1f8d0 100644 --- a/backend/ent/runtime/runtime.go +++ b/backend/ent/runtime/runtime.go @@ -12,6 +12,7 @@ import ( "github.com/Wei-Shaw/sub2api/ent/apikey" "github.com/Wei-Shaw/sub2api/ent/errorpassthroughrule" "github.com/Wei-Shaw/sub2api/ent/group" + "github.com/Wei-Shaw/sub2api/ent/idempotencyrecord" "github.com/Wei-Shaw/sub2api/ent/promocode" "github.com/Wei-Shaw/sub2api/ent/promocodeusage" "github.com/Wei-Shaw/sub2api/ent/proxy" @@ -209,7 +210,7 @@ func init() { // account.DefaultSchedulable holds the default value on creation for the schedulable field. account.DefaultSchedulable = accountDescSchedulable.Default.(bool) // accountDescSessionWindowStatus is the schema descriptor for session_window_status field. - accountDescSessionWindowStatus := accountFields[21].Descriptor() + accountDescSessionWindowStatus := accountFields[23].Descriptor() // account.SessionWindowStatusValidator is a validator for the "session_window_status" field. It is called by the builders before save. account.SessionWindowStatusValidator = accountDescSessionWindowStatus.Validators[0].(func(string) error) accountgroupFields := schema.AccountGroup{}.Fields() @@ -418,6 +419,41 @@ func init() { groupDescSortOrder := groupFields[25].Descriptor() // group.DefaultSortOrder holds the default value on creation for the sort_order field. group.DefaultSortOrder = groupDescSortOrder.Default.(int) + idempotencyrecordMixin := schema.IdempotencyRecord{}.Mixin() + idempotencyrecordMixinFields0 := idempotencyrecordMixin[0].Fields() + _ = idempotencyrecordMixinFields0 + idempotencyrecordFields := schema.IdempotencyRecord{}.Fields() + _ = idempotencyrecordFields + // idempotencyrecordDescCreatedAt is the schema descriptor for created_at field. + idempotencyrecordDescCreatedAt := idempotencyrecordMixinFields0[0].Descriptor() + // idempotencyrecord.DefaultCreatedAt holds the default value on creation for the created_at field. + idempotencyrecord.DefaultCreatedAt = idempotencyrecordDescCreatedAt.Default.(func() time.Time) + // idempotencyrecordDescUpdatedAt is the schema descriptor for updated_at field. + idempotencyrecordDescUpdatedAt := idempotencyrecordMixinFields0[1].Descriptor() + // idempotencyrecord.DefaultUpdatedAt holds the default value on creation for the updated_at field. + idempotencyrecord.DefaultUpdatedAt = idempotencyrecordDescUpdatedAt.Default.(func() time.Time) + // idempotencyrecord.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + idempotencyrecord.UpdateDefaultUpdatedAt = idempotencyrecordDescUpdatedAt.UpdateDefault.(func() time.Time) + // idempotencyrecordDescScope is the schema descriptor for scope field. + idempotencyrecordDescScope := idempotencyrecordFields[0].Descriptor() + // idempotencyrecord.ScopeValidator is a validator for the "scope" field. It is called by the builders before save. + idempotencyrecord.ScopeValidator = idempotencyrecordDescScope.Validators[0].(func(string) error) + // idempotencyrecordDescIdempotencyKeyHash is the schema descriptor for idempotency_key_hash field. + idempotencyrecordDescIdempotencyKeyHash := idempotencyrecordFields[1].Descriptor() + // idempotencyrecord.IdempotencyKeyHashValidator is a validator for the "idempotency_key_hash" field. It is called by the builders before save. + idempotencyrecord.IdempotencyKeyHashValidator = idempotencyrecordDescIdempotencyKeyHash.Validators[0].(func(string) error) + // idempotencyrecordDescRequestFingerprint is the schema descriptor for request_fingerprint field. + idempotencyrecordDescRequestFingerprint := idempotencyrecordFields[2].Descriptor() + // idempotencyrecord.RequestFingerprintValidator is a validator for the "request_fingerprint" field. It is called by the builders before save. + idempotencyrecord.RequestFingerprintValidator = idempotencyrecordDescRequestFingerprint.Validators[0].(func(string) error) + // idempotencyrecordDescStatus is the schema descriptor for status field. + idempotencyrecordDescStatus := idempotencyrecordFields[3].Descriptor() + // idempotencyrecord.StatusValidator is a validator for the "status" field. It is called by the builders before save. + idempotencyrecord.StatusValidator = idempotencyrecordDescStatus.Validators[0].(func(string) error) + // idempotencyrecordDescErrorReason is the schema descriptor for error_reason field. + idempotencyrecordDescErrorReason := idempotencyrecordFields[6].Descriptor() + // idempotencyrecord.ErrorReasonValidator is a validator for the "error_reason" field. It is called by the builders before save. + idempotencyrecord.ErrorReasonValidator = idempotencyrecordDescErrorReason.Validators[0].(func(string) error) promocodeFields := schema.PromoCode{}.Fields() _ = promocodeFields // promocodeDescCode is the schema descriptor for code field. diff --git a/backend/ent/schema/account.go b/backend/ent/schema/account.go index 1cfecc2d5..900d8ee7e 100644 --- a/backend/ent/schema/account.go +++ b/backend/ent/schema/account.go @@ -164,6 +164,19 @@ func (Account) Fields() []ent.Field { Nillable(). SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + // temp_unschedulable_until: 临时不可调度状态解除时间 + // 当命中临时不可调度规则时设置,在此时间前调度器应跳过该账号 + field.Time("temp_unschedulable_until"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "timestamptz"}), + + // temp_unschedulable_reason: 临时不可调度原因,便于排障审计 + field.String("temp_unschedulable_reason"). + Optional(). + Nillable(). + SchemaType(map[string]string{dialect.Postgres: "text"}), + // session_window_*: 会话窗口相关字段 // 用于管理某些需要会话时间窗口的 API(如 Claude Pro) field.Time("session_window_start"). @@ -213,6 +226,9 @@ func (Account) Indexes() []ent.Index { index.Fields("rate_limited_at"), // 筛选速率限制账户 index.Fields("rate_limit_reset_at"), // 筛选速率限制解除时间 index.Fields("overload_until"), // 筛选过载账户 + // 调度热路径复合索引(线上由 SQL 迁移创建部分索引,schema 仅用于模型可读性对齐) + index.Fields("platform", "priority"), + index.Fields("priority", "status"), index.Fields("deleted_at"), // 软删除查询优化 } } diff --git a/backend/ent/schema/usage_log.go b/backend/ent/schema/usage_log.go index ffcae840d..dcca1a0ad 100644 --- a/backend/ent/schema/usage_log.go +++ b/backend/ent/schema/usage_log.go @@ -179,5 +179,7 @@ func (UsageLog) Indexes() []ent.Index { // 复合索引用于时间范围查询 index.Fields("user_id", "created_at"), index.Fields("api_key_id", "created_at"), + // 分组维度时间范围查询(线上由 SQL 迁移创建 group_id IS NOT NULL 的部分索引) + index.Fields("group_id", "created_at"), } } diff --git a/backend/ent/schema/user_subscription.go b/backend/ent/schema/user_subscription.go index fa13612b7..a81850b12 100644 --- a/backend/ent/schema/user_subscription.go +++ b/backend/ent/schema/user_subscription.go @@ -108,6 +108,8 @@ func (UserSubscription) Indexes() []ent.Index { index.Fields("group_id"), index.Fields("status"), index.Fields("expires_at"), + // 活跃订阅查询复合索引(线上由 SQL 迁移创建部分索引,schema 仅用于模型可读性对齐) + index.Fields("user_id", "status", "expires_at"), index.Fields("assigned_by"), // 唯一约束通过部分索引实现(WHERE deleted_at IS NULL),支持软删除后重新订阅 // 见迁移文件 016_soft_delete_partial_unique_indexes.sql diff --git a/backend/ent/tx.go b/backend/ent/tx.go index 4fbe9bb4c..cd3b2296c 100644 --- a/backend/ent/tx.go +++ b/backend/ent/tx.go @@ -28,6 +28,8 @@ type Tx struct { ErrorPassthroughRule *ErrorPassthroughRuleClient // Group is the client for interacting with the Group builders. Group *GroupClient + // IdempotencyRecord is the client for interacting with the IdempotencyRecord builders. + IdempotencyRecord *IdempotencyRecordClient // PromoCode is the client for interacting with the PromoCode builders. PromoCode *PromoCodeClient // PromoCodeUsage is the client for interacting with the PromoCodeUsage builders. @@ -192,6 +194,7 @@ func (tx *Tx) init() { tx.AnnouncementRead = NewAnnouncementReadClient(tx.config) tx.ErrorPassthroughRule = NewErrorPassthroughRuleClient(tx.config) tx.Group = NewGroupClient(tx.config) + tx.IdempotencyRecord = NewIdempotencyRecordClient(tx.config) tx.PromoCode = NewPromoCodeClient(tx.config) tx.PromoCodeUsage = NewPromoCodeUsageClient(tx.config) tx.Proxy = NewProxyClient(tx.config) diff --git a/backend/go.mod b/backend/go.mod index e281f1499..a68bcddfc 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -90,6 +90,7 @@ require ( github.com/goccy/go-json v0.10.2 // indirect github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-querystring v1.1.0 // indirect + github.com/google/subcommands v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/hcl/v2 v2.18.1 // indirect @@ -158,6 +159,7 @@ require ( golang.org/x/mod v0.32.0 // indirect golang.org/x/sys v0.41.0 // indirect golang.org/x/text v0.34.0 // indirect + golang.org/x/tools v0.41.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20250929231259-57b25ae835d4 // indirect gopkg.in/ini.v1 v1.67.0 // indirect modernc.org/libc v1.67.6 // indirect diff --git a/backend/go.sum b/backend/go.sum index d8e4d6e27..2c357e5d9 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -56,6 +56,10 @@ github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XL github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams= github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk= +github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= +github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= +github.com/clipperhouse/uax29/v2 v2.5.0 h1:x7T0T4eTHDONxFJsL94uKNKPHrclyFI0lm7+w94cO8U= +github.com/clipperhouse/uax29/v2 v2.5.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= github.com/coder/websocket v1.8.14 h1:9L0p0iKiNOibykf283eHkKUHHrpG7f65OE3BhhO7v9g= github.com/coder/websocket v1.8.14/go.mod h1:NX3SzP+inril6yawo5CQXx8+fk145lPDC6pumgx0mVg= github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= @@ -140,6 +144,8 @@ github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17 github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= +github.com/google/subcommands v1.2.0 h1:vWQspBTo2nEqTUFita5/KeEWlUL8kQObDFbub/EN9oE= +github.com/google/subcommands v1.2.0/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/wire v0.7.0 h1:JxUKI6+CVBgCO2WToKy/nQk0sS+amI9z9EjVmdaocj4= @@ -194,6 +200,8 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.19 h1:v++JhqYnZuu5jSKrk9RbgF5v4CGUjqRfBm05byFGLdw= +github.com/mattn/go-runewidth v0.0.19/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-sqlite3 v1.14.17 h1:mCRHCLDUBXgpKAqIKsaAaAsrAlbkeomtRFKXh2L6YIM= github.com/mattn/go-sqlite3 v1.14.17/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/mdelapenya/tlscert v0.2.0 h1:7H81W6Z/4weDvZBNOfQte5GpIMo0lGYEeWbkGp5LJHI= @@ -227,6 +235,8 @@ github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/ncruces/go-strftime v1.0.0 h1:HMFp8mLCTPp341M/ZnA4qaf7ZlsbTc+miZjCLOFAw7w= github.com/ncruces/go-strftime v1.0.0/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= @@ -278,6 +288,8 @@ github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ= diff --git a/backend/internal/config/config.go b/backend/internal/config/config.go index c5be017a3..fbe40c5e5 100644 --- a/backend/internal/config/config.go +++ b/backend/internal/config/config.go @@ -518,6 +518,12 @@ type GatewayOpenAIWSConfig struct { LBTopK int `mapstructure:"lb_top_k"` // StickySessionTTLSeconds: session_hash -> account_id 粘连 TTL StickySessionTTLSeconds int `mapstructure:"sticky_session_ttl_seconds"` + // SessionHashReadOldFallback: 会话哈希迁移期是否允许“新 key 未命中时回退读旧 SHA-256 key” + SessionHashReadOldFallback bool `mapstructure:"session_hash_read_old_fallback"` + // SessionHashDualWriteOld: 会话哈希迁移期是否双写旧 SHA-256 key(短 TTL) + SessionHashDualWriteOld bool `mapstructure:"session_hash_dual_write_old"` + // MetadataBridgeEnabled: RequestMetadata 迁移期是否保留旧 ctxkey.* 兼容桥接 + MetadataBridgeEnabled bool `mapstructure:"metadata_bridge_enabled"` // StickyResponseIDTTLSeconds: response_id -> account_id 粘连 TTL StickyResponseIDTTLSeconds int `mapstructure:"sticky_response_id_ttl_seconds"` // StickyPreviousResponseTTLSeconds: 兼容旧键(当新键未设置时回退) @@ -1281,6 +1287,9 @@ func setDefaults() { viper.SetDefault("gateway.openai_ws.payload_log_sample_rate", 0.2) viper.SetDefault("gateway.openai_ws.lb_top_k", 3) viper.SetDefault("gateway.openai_ws.sticky_session_ttl_seconds", 3600) + viper.SetDefault("gateway.openai_ws.session_hash_read_old_fallback", true) + viper.SetDefault("gateway.openai_ws.session_hash_dual_write_old", true) + viper.SetDefault("gateway.openai_ws.metadata_bridge_enabled", true) viper.SetDefault("gateway.openai_ws.sticky_response_id_ttl_seconds", 3600) viper.SetDefault("gateway.openai_ws.sticky_previous_response_ttl_seconds", 3600) viper.SetDefault("gateway.openai_ws.scheduler_score_weights.priority", 1.0) diff --git a/backend/internal/config/config_test.go b/backend/internal/config/config_test.go index 76026328c..907796294 100644 --- a/backend/internal/config/config_test.go +++ b/backend/internal/config/config_test.go @@ -105,6 +105,15 @@ func TestLoadDefaultOpenAIWSConfig(t *testing.T) { if cfg.Gateway.OpenAIWS.StickySessionTTLSeconds != 3600 { t.Fatalf("Gateway.OpenAIWS.StickySessionTTLSeconds = %d, want 3600", cfg.Gateway.OpenAIWS.StickySessionTTLSeconds) } + if !cfg.Gateway.OpenAIWS.SessionHashReadOldFallback { + t.Fatalf("Gateway.OpenAIWS.SessionHashReadOldFallback = false, want true") + } + if !cfg.Gateway.OpenAIWS.SessionHashDualWriteOld { + t.Fatalf("Gateway.OpenAIWS.SessionHashDualWriteOld = false, want true") + } + if !cfg.Gateway.OpenAIWS.MetadataBridgeEnabled { + t.Fatalf("Gateway.OpenAIWS.MetadataBridgeEnabled = false, want true") + } if cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds != 3600 { t.Fatalf("Gateway.OpenAIWS.StickyResponseIDTTLSeconds = %d, want 3600", cfg.Gateway.OpenAIWS.StickyResponseIDTTLSeconds) } @@ -1313,15 +1322,15 @@ func TestValidateConfig_OpenAIWSRules(t *testing.T) { }, wantErr: "gateway.openai_ws.min_idle_per_account must be <= max_idle_per_account", }, - { - name: "max_idle_per_account 不能大于 max_conns_per_account", - mutate: func(c *Config) { - c.Gateway.OpenAIWS.MaxConnsPerAccount = 2 - c.Gateway.OpenAIWS.MinIdlePerAccount = 1 - c.Gateway.OpenAIWS.MaxIdlePerAccount = 3 - }, - wantErr: "gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account", + { + name: "max_idle_per_account 不能大于 max_conns_per_account", + mutate: func(c *Config) { + c.Gateway.OpenAIWS.MaxConnsPerAccount = 2 + c.Gateway.OpenAIWS.MinIdlePerAccount = 1 + c.Gateway.OpenAIWS.MaxIdlePerAccount = 3 }, + wantErr: "gateway.openai_ws.max_idle_per_account must be <= max_conns_per_account", + }, { name: "dial_timeout_seconds 必须为正数", mutate: func(c *Config) { c.Gateway.OpenAIWS.DialTimeoutSeconds = 0 }, diff --git a/backend/internal/handler/admin/account_handler.go b/backend/internal/handler/admin/account_handler.go index df82476c0..f200de86e 100644 --- a/backend/internal/handler/admin/account_handler.go +++ b/backend/internal/handler/admin/account_handler.go @@ -1336,6 +1336,34 @@ func (h *AccountHandler) GetTodayStats(c *gin.Context) { response.Success(c, stats) } +// BatchTodayStatsRequest 批量今日统计请求体。 +type BatchTodayStatsRequest struct { + AccountIDs []int64 `json:"account_ids" binding:"required"` +} + +// GetBatchTodayStats 批量获取多个账号的今日统计。 +// POST /api/v1/admin/accounts/today-stats/batch +func (h *AccountHandler) GetBatchTodayStats(c *gin.Context) { + var req BatchTodayStatsRequest + if err := c.ShouldBindJSON(&req); err != nil { + response.BadRequest(c, "Invalid request: "+err.Error()) + return + } + + if len(req.AccountIDs) == 0 { + response.Success(c, gin.H{"stats": map[string]any{}}) + return + } + + stats, err := h.accountUsageService.GetTodayStatsBatch(c.Request.Context(), req.AccountIDs) + if err != nil { + response.ErrorFrom(c, err) + return + } + + response.Success(c, gin.H{"stats": stats}) +} + // SetSchedulableRequest represents the request body for setting schedulable status type SetSchedulableRequest struct { Schedulable bool `json:"schedulable"` diff --git a/backend/internal/handler/failover_loop.go b/backend/internal/handler/failover_loop.go index 1f8a7e9af..b2583301a 100644 --- a/backend/internal/handler/failover_loop.go +++ b/backend/internal/handler/failover_loop.go @@ -2,11 +2,12 @@ package handler import ( "context" - "log" "net/http" "time" + "github.com/Wei-Shaw/sub2api/internal/pkg/logger" "github.com/Wei-Shaw/sub2api/internal/service" + "go.uber.org/zap" ) // TempUnscheduler 用于 HandleFailoverError 中同账号重试耗尽后的临时封禁。 @@ -78,8 +79,12 @@ func (s *FailoverState) HandleFailoverError( // 同账号重试:对 RetryableOnSameAccount 的临时性错误,先在同一账号上重试 if failoverErr.RetryableOnSameAccount && s.SameAccountRetryCount[accountID] < maxSameAccountRetries { s.SameAccountRetryCount[accountID]++ - log.Printf("Account %d: retryable error %d, same-account retry %d/%d", - accountID, failoverErr.StatusCode, s.SameAccountRetryCount[accountID], maxSameAccountRetries) + logger.FromContext(ctx).Warn("gateway.failover_same_account_retry", + zap.Int64("account_id", accountID), + zap.Int("upstream_status", failoverErr.StatusCode), + zap.Int("same_account_retry_count", s.SameAccountRetryCount[accountID]), + zap.Int("same_account_retry_max", maxSameAccountRetries), + ) if !sleepWithContext(ctx, sameAccountRetryDelay) { return FailoverCanceled } @@ -101,8 +106,12 @@ func (s *FailoverState) HandleFailoverError( // 递增切换计数 s.SwitchCount++ - log.Printf("Account %d: upstream error %d, switching account %d/%d", - accountID, failoverErr.StatusCode, s.SwitchCount, s.MaxSwitches) + logger.FromContext(ctx).Warn("gateway.failover_switch_account", + zap.Int64("account_id", accountID), + zap.Int("upstream_status", failoverErr.StatusCode), + zap.Int("switch_count", s.SwitchCount), + zap.Int("max_switches", s.MaxSwitches), + ) // Antigravity 平台换号线性递增延时 if platform == service.PlatformAntigravity { @@ -127,13 +136,18 @@ func (s *FailoverState) HandleSelectionExhausted(ctx context.Context) FailoverAc s.LastFailoverErr.StatusCode == http.StatusServiceUnavailable && s.SwitchCount <= s.MaxSwitches { - log.Printf("Antigravity single-account 503 backoff: waiting %v before retry (attempt %d)", - singleAccountBackoffDelay, s.SwitchCount) + logger.FromContext(ctx).Warn("gateway.failover_single_account_backoff", + zap.Duration("backoff_delay", singleAccountBackoffDelay), + zap.Int("switch_count", s.SwitchCount), + zap.Int("max_switches", s.MaxSwitches), + ) if !sleepWithContext(ctx, singleAccountBackoffDelay) { return FailoverCanceled } - log.Printf("Antigravity single-account 503 retry: clearing failed accounts, retry %d/%d", - s.SwitchCount, s.MaxSwitches) + logger.FromContext(ctx).Warn("gateway.failover_single_account_retry", + zap.Int("switch_count", s.SwitchCount), + zap.Int("max_switches", s.MaxSwitches), + ) s.FailedAccountIDs = make(map[int64]struct{}) return FailoverContinue } diff --git a/backend/internal/handler/gateway_handler.go b/backend/internal/handler/gateway_handler.go index 459fb60f6..9262df7eb 100644 --- a/backend/internal/handler/gateway_handler.go +++ b/backend/internal/handler/gateway_handler.go @@ -6,10 +6,10 @@ import ( "encoding/json" "errors" "fmt" - "io" "net/http" "strconv" "strings" + "sync/atomic" "time" "github.com/Wei-Shaw/sub2api/internal/config" @@ -18,6 +18,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/pkg/claude" "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" pkgerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" + pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" "github.com/Wei-Shaw/sub2api/internal/pkg/openai" @@ -28,6 +29,10 @@ import ( "go.uber.org/zap" ) +const gatewayCompatibilityMetricsLogInterval = 1024 + +var gatewayCompatibilityMetricsLogCounter atomic.Uint64 + // GatewayHandler handles API gateway requests type GatewayHandler struct { gatewayService *service.GatewayService @@ -110,9 +115,10 @@ func (h *GatewayHandler) Messages(c *gin.Context) { zap.Int64("api_key_id", apiKey.ID), zap.Any("group_id", apiKey.GroupID), ) + defer h.maybeLogCompatibilityFallbackMetrics(reqLog) // 读取请求体 - body, err := io.ReadAll(c.Request.Body) + body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request) if err != nil { if maxErr, ok := extractMaxBytesError(err); ok { h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) @@ -141,16 +147,16 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 设置 max_tokens=1 + haiku 探测请求标识到 context 中 // 必须在 SetClaudeCodeClientContext 之前设置,因为 ClaudeCodeValidator 需要读取此标识进行绕过判断 if isMaxTokensOneHaikuRequest(reqModel, parsedReq.MaxTokens, reqStream) { - ctx := context.WithValue(c.Request.Context(), ctxkey.IsMaxTokensOneHaikuRequest, true) + ctx := service.WithIsMaxTokensOneHaikuRequest(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) } - // 检查是否为 Claude Code 客户端,设置到 context 中 - SetClaudeCodeClientContext(c, body) + // 检查是否为 Claude Code 客户端,设置到 context 中(复用已解析请求,避免二次反序列化)。 + SetClaudeCodeClientContext(c, body, parsedReq) isClaudeCodeClient := service.IsClaudeCodeClient(c.Request.Context()) // 在请求上下文中记录 thinking 状态,供 Antigravity 最终模型 key 推导/模型维度限流使用 - c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), ctxkey.ThinkingEnabled, parsedReq.ThinkingEnabled)) + c.Request = c.Request.WithContext(service.WithThinkingEnabled(c.Request.Context(), parsedReq.ThinkingEnabled, h.metadataBridgeEnabled())) setOpsRequestContext(c, reqModel, reqStream, body) @@ -248,8 +254,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { if apiKey.GroupID != nil { prefetchedGroupID = *apiKey.GroupID } - ctx := context.WithValue(c.Request.Context(), ctxkey.PrefetchedStickyAccountID, sessionBoundAccountID) - ctx = context.WithValue(ctx, ctxkey.PrefetchedStickyGroupID, prefetchedGroupID) + ctx := service.WithPrefetchedStickySession(c.Request.Context(), sessionBoundAccountID, prefetchedGroupID, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) } } @@ -262,7 +267,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。 // 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。 if h.gatewayService.IsSingleAntigravityAccountGroup(c.Request.Context(), apiKey.GroupID) { - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) } @@ -276,7 +281,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { action := fs.HandleSelectionExhausted(c.Request.Context()) switch action { case FailoverContinue: - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) continue case FailoverCanceled: @@ -365,7 +370,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { var result *service.ForwardResult requestCtx := c.Request.Context() if fs.SwitchCount > 0 { - requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount) + requestCtx = service.WithAccountSwitchCount(requestCtx, fs.SwitchCount, h.metadataBridgeEnabled()) } if account.Platform == service.PlatformAntigravity { result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, reqModel, "generateContent", reqStream, body, hasBoundSession) @@ -440,7 +445,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { // 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。 // 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。 if h.gatewayService.IsSingleAntigravityAccountGroup(c.Request.Context(), currentAPIKey.GroupID) { - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) } @@ -459,7 +464,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { action := fs.HandleSelectionExhausted(c.Request.Context()) switch action { case FailoverContinue: - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) continue case FailoverCanceled: @@ -548,7 +553,7 @@ func (h *GatewayHandler) Messages(c *gin.Context) { var result *service.ForwardResult requestCtx := c.Request.Context() if fs.SwitchCount > 0 { - requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount) + requestCtx = service.WithAccountSwitchCount(requestCtx, fs.SwitchCount, h.metadataBridgeEnabled()) } if account.Platform == service.PlatformAntigravity && account.Type != service.AccountTypeAPIKey { result, err = h.antigravityGatewayService.Forward(requestCtx, c, account, body, hasBoundSession) @@ -1013,9 +1018,10 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { zap.Int64("api_key_id", apiKey.ID), zap.Any("group_id", apiKey.GroupID), ) + defer h.maybeLogCompatibilityFallbackMetrics(reqLog) // 读取请求体 - body, err := io.ReadAll(c.Request.Body) + body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request) if err != nil { if maxErr, ok := extractMaxBytesError(err); ok { h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) @@ -1030,9 +1036,6 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { return } - // 检查是否为 Claude Code 客户端,设置到 context 中 - SetClaudeCodeClientContext(c, body) - setOpsRequestContext(c, "", false, body) parsedReq, err := service.ParseGatewayRequest(body, domain.PlatformAnthropic) @@ -1040,9 +1043,11 @@ func (h *GatewayHandler) CountTokens(c *gin.Context) { h.errorResponse(c, http.StatusBadRequest, "invalid_request_error", "Failed to parse request body") return } + // count_tokens 走 messages 严格校验时,复用已解析请求,避免二次反序列化。 + SetClaudeCodeClientContext(c, body, parsedReq) reqLog = reqLog.With(zap.String("model", parsedReq.Model), zap.Bool("stream", parsedReq.Stream)) // 在请求上下文中记录 thinking 状态,供 Antigravity 最终模型 key 推导/模型维度限流使用 - c.Request = c.Request.WithContext(context.WithValue(c.Request.Context(), ctxkey.ThinkingEnabled, parsedReq.ThinkingEnabled)) + c.Request = c.Request.WithContext(service.WithThinkingEnabled(c.Request.Context(), parsedReq.ThinkingEnabled, h.metadataBridgeEnabled())) // 验证 model 必填 if parsedReq.Model == "" { @@ -1320,6 +1325,30 @@ func billingErrorDetails(err error) (status int, code, message string) { return http.StatusForbidden, "billing_error", msg } +func (h *GatewayHandler) metadataBridgeEnabled() bool { + if h == nil || h.cfg == nil { + return true + } + return h.cfg.Gateway.OpenAIWS.MetadataBridgeEnabled +} + +func (h *GatewayHandler) maybeLogCompatibilityFallbackMetrics(reqLog *zap.Logger) { + if reqLog == nil { + return + } + if gatewayCompatibilityMetricsLogCounter.Add(1)%gatewayCompatibilityMetricsLogInterval != 0 { + return + } + metrics := service.SnapshotOpenAICompatibilityFallbackMetrics() + reqLog.Info("gateway.compatibility_fallback_metrics", + zap.Int64("session_hash_legacy_read_fallback_total", metrics.SessionHashLegacyReadFallbackTotal), + zap.Int64("session_hash_legacy_read_fallback_hit", metrics.SessionHashLegacyReadFallbackHit), + zap.Int64("session_hash_legacy_dual_write_total", metrics.SessionHashLegacyDualWriteTotal), + zap.Float64("session_hash_legacy_read_hit_rate", metrics.SessionHashLegacyReadHitRate), + zap.Int64("metadata_legacy_fallback_total", metrics.MetadataLegacyFallbackTotal), + ) +} + func (h *GatewayHandler) submitUsageRecordTask(task service.UsageRecordTask) { if task == nil { return diff --git a/backend/internal/handler/gateway_helper.go b/backend/internal/handler/gateway_helper.go index efff7997f..ea8a5f1a9 100644 --- a/backend/internal/handler/gateway_helper.go +++ b/backend/internal/handler/gateway_helper.go @@ -18,12 +18,17 @@ import ( // claudeCodeValidator is a singleton validator for Claude Code client detection var claudeCodeValidator = service.NewClaudeCodeValidator() +const claudeCodeParsedRequestContextKey = "claude_code_parsed_request" + // SetClaudeCodeClientContext 检查请求是否来自 Claude Code 客户端,并设置到 context 中 // 返回更新后的 context -func SetClaudeCodeClientContext(c *gin.Context, body []byte) { +func SetClaudeCodeClientContext(c *gin.Context, body []byte, parsedReq *service.ParsedRequest) { if c == nil || c.Request == nil { return } + if parsedReq != nil { + c.Set(claudeCodeParsedRequestContextKey, parsedReq) + } // Fast path:非 Claude CLI UA 直接判定 false,避免热路径二次 JSON 反序列化。 if !claudeCodeValidator.ValidateUserAgent(c.GetHeader("User-Agent")) { ctx := service.SetClaudeCodeClient(c.Request.Context(), false) @@ -37,8 +42,11 @@ func SetClaudeCodeClientContext(c *gin.Context, body []byte) { isClaudeCode = true } else { // 仅在确认为 Claude CLI 且 messages 路径时再做 body 解析。 - var bodyMap map[string]any - if len(body) > 0 { + bodyMap := claudeCodeBodyMapFromParsedRequest(parsedReq) + if bodyMap == nil { + bodyMap = claudeCodeBodyMapFromContextCache(c) + } + if bodyMap == nil && len(body) > 0 { _ = json.Unmarshal(body, &bodyMap) } isClaudeCode = claudeCodeValidator.Validate(c.Request, bodyMap) @@ -49,6 +57,42 @@ func SetClaudeCodeClientContext(c *gin.Context, body []byte) { c.Request = c.Request.WithContext(ctx) } +func claudeCodeBodyMapFromParsedRequest(parsedReq *service.ParsedRequest) map[string]any { + if parsedReq == nil { + return nil + } + bodyMap := map[string]any{ + "model": parsedReq.Model, + } + if parsedReq.System != nil || parsedReq.HasSystem { + bodyMap["system"] = parsedReq.System + } + if parsedReq.MetadataUserID != "" { + bodyMap["metadata"] = map[string]any{"user_id": parsedReq.MetadataUserID} + } + return bodyMap +} + +func claudeCodeBodyMapFromContextCache(c *gin.Context) map[string]any { + if c == nil { + return nil + } + if cached, ok := c.Get(service.OpenAIParsedRequestBodyKey); ok { + if bodyMap, ok := cached.(map[string]any); ok { + return bodyMap + } + } + if cached, ok := c.Get(claudeCodeParsedRequestContextKey); ok { + switch v := cached.(type) { + case *service.ParsedRequest: + return claudeCodeBodyMapFromParsedRequest(v) + case service.ParsedRequest: + return claudeCodeBodyMapFromParsedRequest(&v) + } + } + return nil +} + // 并发槽位等待相关常量 // // 性能优化说明: diff --git a/backend/internal/handler/gateway_helper_fastpath_test.go b/backend/internal/handler/gateway_helper_fastpath_test.go index 3e6c376b2..31d489f08 100644 --- a/backend/internal/handler/gateway_helper_fastpath_test.go +++ b/backend/internal/handler/gateway_helper_fastpath_test.go @@ -33,6 +33,14 @@ func (m *concurrencyCacheMock) GetAccountConcurrency(ctx context.Context, accoun return 0, nil } +func (m *concurrencyCacheMock) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { + result := make(map[int64]int, len(accountIDs)) + for _, accountID := range accountIDs { + result[accountID] = 0 + } + return result, nil +} + func (m *concurrencyCacheMock) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { return true, nil } diff --git a/backend/internal/handler/gateway_helper_hotpath_test.go b/backend/internal/handler/gateway_helper_hotpath_test.go index 3fdf1bfcc..f8f7eaca2 100644 --- a/backend/internal/handler/gateway_helper_hotpath_test.go +++ b/backend/internal/handler/gateway_helper_hotpath_test.go @@ -49,6 +49,14 @@ func (s *helperConcurrencyCacheStub) GetAccountConcurrency(ctx context.Context, return 0, nil } +func (s *helperConcurrencyCacheStub) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { + out := make(map[int64]int, len(accountIDs)) + for _, accountID := range accountIDs { + out[accountID] = 0 + } + return out, nil +} + func (s *helperConcurrencyCacheStub) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { return true, nil } @@ -133,7 +141,7 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) { c, _ := newHelperTestContext(http.MethodPost, "/v1/messages") c.Request.Header.Set("User-Agent", "curl/8.6.0") - SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON()) + SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON(), nil) require.False(t, service.IsClaudeCodeClient(c.Request.Context())) }) @@ -141,7 +149,7 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) { c, _ := newHelperTestContext(http.MethodGet, "/v1/models") c.Request.Header.Set("User-Agent", "claude-cli/1.0.1") - SetClaudeCodeClientContext(c, nil) + SetClaudeCodeClientContext(c, nil, nil) require.True(t, service.IsClaudeCodeClient(c.Request.Context())) }) @@ -152,7 +160,7 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) { c.Request.Header.Set("anthropic-beta", "message-batches-2024-09-24") c.Request.Header.Set("anthropic-version", "2023-06-01") - SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON()) + SetClaudeCodeClientContext(c, validClaudeCodeBodyJSON(), nil) require.True(t, service.IsClaudeCodeClient(c.Request.Context())) }) @@ -160,11 +168,51 @@ func TestSetClaudeCodeClientContext_FastPathAndStrictPath(t *testing.T) { c, _ := newHelperTestContext(http.MethodPost, "/v1/messages") c.Request.Header.Set("User-Agent", "claude-cli/1.0.1") // 缺少严格校验所需 header + body 字段 - SetClaudeCodeClientContext(c, []byte(`{"model":"x"}`)) + SetClaudeCodeClientContext(c, []byte(`{"model":"x"}`), nil) require.False(t, service.IsClaudeCodeClient(c.Request.Context())) }) } +func TestSetClaudeCodeClientContext_ReuseParsedRequestAndContextCache(t *testing.T) { + t.Run("reuse parsed request without body unmarshal", func(t *testing.T) { + c, _ := newHelperTestContext(http.MethodPost, "/v1/messages") + c.Request.Header.Set("User-Agent", "claude-cli/1.0.1") + c.Request.Header.Set("X-App", "claude-code") + c.Request.Header.Set("anthropic-beta", "message-batches-2024-09-24") + c.Request.Header.Set("anthropic-version", "2023-06-01") + + parsedReq := &service.ParsedRequest{ + Model: "claude-3-5-sonnet-20241022", + System: []any{ + map[string]any{"text": "You are Claude Code, Anthropic's official CLI for Claude."}, + }, + MetadataUserID: "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123", + } + + // body 非法 JSON,如果函数复用 parsedReq 成功则仍应判定为 Claude Code。 + SetClaudeCodeClientContext(c, []byte(`{invalid`), parsedReq) + require.True(t, service.IsClaudeCodeClient(c.Request.Context())) + }) + + t.Run("reuse context cache without body unmarshal", func(t *testing.T) { + c, _ := newHelperTestContext(http.MethodPost, "/v1/messages") + c.Request.Header.Set("User-Agent", "claude-cli/1.0.1") + c.Request.Header.Set("X-App", "claude-code") + c.Request.Header.Set("anthropic-beta", "message-batches-2024-09-24") + c.Request.Header.Set("anthropic-version", "2023-06-01") + c.Set(service.OpenAIParsedRequestBodyKey, map[string]any{ + "model": "claude-3-5-sonnet-20241022", + "system": []any{ + map[string]any{"text": "You are Claude Code, Anthropic's official CLI for Claude."}, + }, + "metadata": map[string]any{"user_id": "user_aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa_account__session_abc-123"}, + }) + + SetClaudeCodeClientContext(c, []byte(`{invalid`), nil) + require.True(t, service.IsClaudeCodeClient(c.Request.Context())) + }) +} + func TestWaitForSlotWithPingTimeout_AccountAndUserAcquire(t *testing.T) { cache := &helperConcurrencyCacheStub{ accountSeq: []bool{false, true}, diff --git a/backend/internal/handler/gemini_v1beta_handler.go b/backend/internal/handler/gemini_v1beta_handler.go index 2da0570be..50af9c8f2 100644 --- a/backend/internal/handler/gemini_v1beta_handler.go +++ b/backend/internal/handler/gemini_v1beta_handler.go @@ -7,16 +7,15 @@ import ( "encoding/hex" "encoding/json" "errors" - "io" "net/http" "regexp" "strings" "github.com/Wei-Shaw/sub2api/internal/domain" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" - "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/gemini" "github.com/Wei-Shaw/sub2api/internal/pkg/googleapi" + pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" "github.com/Wei-Shaw/sub2api/internal/server/middleware" @@ -168,7 +167,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { stream := action == "streamGenerateContent" reqLog = reqLog.With(zap.String("model", modelName), zap.String("action", action), zap.Bool("stream", stream)) - body, err := io.ReadAll(c.Request.Body) + body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request) if err != nil { if maxErr, ok := extractMaxBytesError(err); ok { googleError(c, http.StatusRequestEntityTooLarge, buildBodyTooLargeMessage(maxErr.Limit)) @@ -268,8 +267,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { if apiKey.GroupID != nil { prefetchedGroupID = *apiKey.GroupID } - ctx := context.WithValue(c.Request.Context(), ctxkey.PrefetchedStickyAccountID, sessionBoundAccountID) - ctx = context.WithValue(ctx, ctxkey.PrefetchedStickyGroupID, prefetchedGroupID) + ctx := service.WithPrefetchedStickySession(c.Request.Context(), sessionBoundAccountID, prefetchedGroupID, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) } } @@ -349,7 +347,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { // 单账号分组提前设置 SingleAccountRetry 标记,让 Service 层首次 503 就不设模型限流标记。 // 避免单账号分组收到 503 (MODEL_CAPACITY_EXHAUSTED) 时设 29s 限流,导致后续请求连续快速失败。 if h.gatewayService.IsSingleAntigravityAccountGroup(c.Request.Context(), apiKey.GroupID) { - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) } @@ -363,7 +361,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { action := fs.HandleSelectionExhausted(c.Request.Context()) switch action { case FailoverContinue: - ctx := context.WithValue(c.Request.Context(), ctxkey.SingleAccountRetry, true) + ctx := service.WithSingleAccountRetry(c.Request.Context(), true, h.metadataBridgeEnabled()) c.Request = c.Request.WithContext(ctx) continue case FailoverCanceled: @@ -456,7 +454,7 @@ func (h *GatewayHandler) GeminiV1BetaModels(c *gin.Context) { var result *service.ForwardResult requestCtx := c.Request.Context() if fs.SwitchCount > 0 { - requestCtx = context.WithValue(requestCtx, ctxkey.AccountSwitchCount, fs.SwitchCount) + requestCtx = service.WithAccountSwitchCount(requestCtx, fs.SwitchCount, h.metadataBridgeEnabled()) } if account.Platform == service.PlatformAntigravity && account.Type != service.AccountTypeAPIKey { result, err = h.antigravityGatewayService.ForwardGemini(requestCtx, c, account, modelName, action, stream, body, hasBoundSession) diff --git a/backend/internal/handler/openai_gateway_handler.go b/backend/internal/handler/openai_gateway_handler.go index 6caa92721..44f4889d6 100644 --- a/backend/internal/handler/openai_gateway_handler.go +++ b/backend/internal/handler/openai_gateway_handler.go @@ -1,12 +1,10 @@ package handler import ( - "bytes" "context" "encoding/json" "errors" "fmt" - "io" "net/http" "runtime/debug" "strconv" @@ -14,6 +12,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" @@ -36,34 +35,6 @@ type OpenAIGatewayHandler struct { maxAccountSwitches int } -const ( - openAIRequestBodyReadInitCap = 512 - openAIRequestBodyReadMaxInitCap = 1 << 20 -) - -func readRequestBodyWithPrealloc(req *http.Request) ([]byte, error) { - if req == nil || req.Body == nil { - return nil, nil - } - capHint := openAIRequestBodyReadInitCap - if req.ContentLength > 0 { - switch { - case req.ContentLength < int64(openAIRequestBodyReadInitCap): - capHint = openAIRequestBodyReadInitCap - case req.ContentLength > int64(openAIRequestBodyReadMaxInitCap): - capHint = openAIRequestBodyReadMaxInitCap - default: - capHint = int(req.ContentLength) - } - } - - buf := bytes.NewBuffer(make([]byte, 0, capHint)) - if _, err := io.Copy(buf, req.Body); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - // NewOpenAIGatewayHandler creates a new OpenAIGatewayHandler func NewOpenAIGatewayHandler( gatewayService *service.OpenAIGatewayService, @@ -126,7 +97,7 @@ func (h *OpenAIGatewayHandler) Responses(c *gin.Context) { } // Read request body - body, err := readRequestBodyWithPrealloc(c.Request) + body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request) if err != nil { if maxErr, ok := extractMaxBytesError(err); ok { h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) diff --git a/backend/internal/handler/openai_gateway_handler_test.go b/backend/internal/handler/openai_gateway_handler_test.go index 71ce29875..689cc4d89 100644 --- a/backend/internal/handler/openai_gateway_handler_test.go +++ b/backend/internal/handler/openai_gateway_handler_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil" "github.com/Wei-Shaw/sub2api/internal/server/middleware" "github.com/Wei-Shaw/sub2api/internal/service" "github.com/gin-gonic/gin" @@ -112,7 +113,7 @@ func TestReadRequestBodyWithPrealloc(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(payload)) req.ContentLength = int64(len(payload)) - body, err := readRequestBodyWithPrealloc(req) + body, err := pkghttputil.ReadRequestBodyWithPrealloc(req) require.NoError(t, err) require.Equal(t, payload, string(body)) } @@ -122,7 +123,7 @@ func TestReadRequestBodyWithPrealloc_MaxBytesError(t *testing.T) { req := httptest.NewRequest(http.MethodPost, "/v1/responses", strings.NewReader(strings.Repeat("x", 8))) req.Body = http.MaxBytesReader(rec, req.Body, 4) - _, err := readRequestBodyWithPrealloc(req) + _, err := pkghttputil.ReadRequestBodyWithPrealloc(req) require.Error(t, err) var maxErr *http.MaxBytesError require.ErrorAs(t, err, &maxErr) diff --git a/backend/internal/handler/ops_error_logger.go b/backend/internal/handler/ops_error_logger.go index ab9a21674..a1ec3569e 100644 --- a/backend/internal/handler/ops_error_logger.go +++ b/backend/internal/handler/ops_error_logger.go @@ -311,6 +311,35 @@ type opsCaptureWriter struct { buf bytes.Buffer } +const opsCaptureWriterLimit = 64 * 1024 + +var opsCaptureWriterPool = sync.Pool{ + New: func() any { + return &opsCaptureWriter{limit: opsCaptureWriterLimit} + }, +} + +func acquireOpsCaptureWriter(rw gin.ResponseWriter) *opsCaptureWriter { + w, ok := opsCaptureWriterPool.Get().(*opsCaptureWriter) + if !ok || w == nil { + w = &opsCaptureWriter{} + } + w.ResponseWriter = rw + w.limit = opsCaptureWriterLimit + w.buf.Reset() + return w +} + +func releaseOpsCaptureWriter(w *opsCaptureWriter) { + if w == nil { + return + } + w.ResponseWriter = nil + w.limit = opsCaptureWriterLimit + w.buf.Reset() + opsCaptureWriterPool.Put(w) +} + func (w *opsCaptureWriter) Write(b []byte) (int, error) { if w.Status() >= 400 && w.limit > 0 && w.buf.Len() < w.limit { remaining := w.limit - w.buf.Len() @@ -342,7 +371,8 @@ func (w *opsCaptureWriter) WriteString(s string) (int, error) { // - Streaming errors after the response has started (SSE) may still need explicit logging. func OpsErrorLoggerMiddleware(ops *service.OpsService) gin.HandlerFunc { return func(c *gin.Context) { - w := &opsCaptureWriter{ResponseWriter: c.Writer, limit: 64 * 1024} + w := acquireOpsCaptureWriter(c.Writer) + defer releaseOpsCaptureWriter(w) c.Writer = w c.Next() diff --git a/backend/internal/handler/ops_error_logger_test.go b/backend/internal/handler/ops_error_logger_test.go index a11fa1f2e..fe7466923 100644 --- a/backend/internal/handler/ops_error_logger_test.go +++ b/backend/internal/handler/ops_error_logger_test.go @@ -173,3 +173,23 @@ func TestEnqueueOpsErrorLog_EarlyReturnBranches(t *testing.T) { enqueueOpsErrorLog(ops, entry) require.Equal(t, int64(0), OpsErrorLogEnqueuedTotal()) } + +func TestOpsCaptureWriterPool_ResetOnRelease(t *testing.T) { + gin.SetMode(gin.TestMode) + + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodGet, "/test", nil) + + writer := acquireOpsCaptureWriter(c.Writer) + require.NotNil(t, writer) + _, err := writer.buf.WriteString("temp-error-body") + require.NoError(t, err) + + releaseOpsCaptureWriter(writer) + + reused := acquireOpsCaptureWriter(c.Writer) + defer releaseOpsCaptureWriter(reused) + + require.Zero(t, reused.buf.Len(), "writer should be reset before reuse") +} diff --git a/backend/internal/handler/sora_gateway_handler.go b/backend/internal/handler/sora_gateway_handler.go index ccec0a812..48c1e451b 100644 --- a/backend/internal/handler/sora_gateway_handler.go +++ b/backend/internal/handler/sora_gateway_handler.go @@ -7,7 +7,6 @@ import ( "encoding/json" "errors" "fmt" - "io" "net/http" "os" "path" @@ -17,6 +16,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + pkghttputil "github.com/Wei-Shaw/sub2api/internal/pkg/httputil" "github.com/Wei-Shaw/sub2api/internal/pkg/ip" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" middleware2 "github.com/Wei-Shaw/sub2api/internal/server/middleware" @@ -107,7 +107,7 @@ func (h *SoraGatewayHandler) ChatCompletions(c *gin.Context) { zap.Any("group_id", apiKey.GroupID), ) - body, err := io.ReadAll(c.Request.Body) + body, err := pkghttputil.ReadRequestBodyWithPrealloc(c.Request) if err != nil { if maxErr, ok := extractMaxBytesError(err); ok { h.errorResponse(c, http.StatusRequestEntityTooLarge, "invalid_request_error", buildBodyTooLargeMessage(maxErr.Limit)) diff --git a/backend/internal/pkg/errors/errors_test.go b/backend/internal/pkg/errors/errors_test.go index 1a1c842ee..25e629073 100644 --- a/backend/internal/pkg/errors/errors_test.go +++ b/backend/internal/pkg/errors/errors_test.go @@ -166,3 +166,18 @@ func TestToHTTP(t *testing.T) { }) } } + +func TestToHTTP_MetadataDeepCopy(t *testing.T) { + md := map[string]string{"k": "v"} + appErr := BadRequest("BAD_REQUEST", "invalid").WithMetadata(md) + + code, body := ToHTTP(appErr) + require.Equal(t, http.StatusBadRequest, code) + require.Equal(t, "v", body.Metadata["k"]) + + md["k"] = "changed" + require.Equal(t, "v", body.Metadata["k"]) + + appErr.Metadata["k"] = "changed-again" + require.Equal(t, "v", body.Metadata["k"]) +} diff --git a/backend/internal/pkg/errors/http.go b/backend/internal/pkg/errors/http.go index 7b5560e37..420c69a3b 100644 --- a/backend/internal/pkg/errors/http.go +++ b/backend/internal/pkg/errors/http.go @@ -16,6 +16,16 @@ func ToHTTP(err error) (statusCode int, body Status) { return http.StatusOK, Status{Code: int32(http.StatusOK)} } - cloned := Clone(appErr) - return int(cloned.Code), cloned.Status + body = Status{ + Code: appErr.Code, + Reason: appErr.Reason, + Message: appErr.Message, + } + if appErr.Metadata != nil { + body.Metadata = make(map[string]string, len(appErr.Metadata)) + for k, v := range appErr.Metadata { + body.Metadata[k] = v + } + } + return int(appErr.Code), body } diff --git a/backend/internal/pkg/httpclient/pool.go b/backend/internal/pkg/httpclient/pool.go index 76b7aa915..6ef3d7141 100644 --- a/backend/internal/pkg/httpclient/pool.go +++ b/backend/internal/pkg/httpclient/pool.go @@ -32,6 +32,7 @@ const ( defaultMaxIdleConns = 100 // 最大空闲连接数 defaultMaxIdleConnsPerHost = 10 // 每个主机最大空闲连接数 defaultIdleConnTimeout = 90 * time.Second // 空闲连接超时时间(建议小于上游 LB 超时) + validatedHostTTL = 30 * time.Second // DNS Rebinding 校验缓存 TTL ) // Options 定义共享 HTTP 客户端的构建参数 @@ -53,6 +54,9 @@ type Options struct { // sharedClients 存储按配置参数缓存的 http.Client 实例 var sharedClients sync.Map +// 允许测试替换校验函数,生产默认指向真实实现。 +var validateResolvedIP = urlvalidator.ValidateResolvedIP + // GetClient 返回共享的 HTTP 客户端实例 // 性能优化:相同配置复用同一客户端,避免重复创建 Transport // 安全说明:代理配置失败时直接返回错误,不会回退到直连,避免 IP 关联风险 @@ -84,7 +88,7 @@ func buildClient(opts Options) (*http.Client, error) { var rt http.RoundTripper = transport if opts.ValidateResolvedIP && !opts.AllowPrivateHosts { - rt = &validatedTransport{base: transport} + rt = newValidatedTransport(transport) } return &http.Client{ Transport: rt, @@ -149,17 +153,56 @@ func buildClientKey(opts Options) string { } type validatedTransport struct { - base http.RoundTripper + base http.RoundTripper + validatedHosts sync.Map // map[string]time.Time, value 为过期时间 + now func() time.Time +} + +func newValidatedTransport(base http.RoundTripper) *validatedTransport { + return &validatedTransport{ + base: base, + now: time.Now, + } +} + +func (t *validatedTransport) isValidatedHost(host string, now time.Time) bool { + if t == nil { + return false + } + raw, ok := t.validatedHosts.Load(host) + if !ok { + return false + } + expireAt, ok := raw.(time.Time) + if !ok { + t.validatedHosts.Delete(host) + return false + } + if now.Before(expireAt) { + return true + } + t.validatedHosts.Delete(host) + return false } func (t *validatedTransport) RoundTrip(req *http.Request) (*http.Response, error) { if req != nil && req.URL != nil { - host := strings.TrimSpace(req.URL.Hostname()) + host := strings.ToLower(strings.TrimSpace(req.URL.Hostname())) if host != "" { - if err := urlvalidator.ValidateResolvedIP(host); err != nil { - return nil, err + now := time.Now() + if t != nil && t.now != nil { + now = t.now() + } + if !t.isValidatedHost(host, now) { + if err := validateResolvedIP(host); err != nil { + return nil, err + } + t.validatedHosts.Store(host, now.Add(validatedHostTTL)) } } } + if t == nil || t.base == nil { + return nil, fmt.Errorf("validated transport base is nil") + } return t.base.RoundTrip(req) } diff --git a/backend/internal/pkg/httpclient/pool_test.go b/backend/internal/pkg/httpclient/pool_test.go new file mode 100644 index 000000000..f945758a9 --- /dev/null +++ b/backend/internal/pkg/httpclient/pool_test.go @@ -0,0 +1,115 @@ +package httpclient + +import ( + "errors" + "io" + "net/http" + "strings" + "sync/atomic" + "testing" + "time" + + "github.com/stretchr/testify/require" +) + +type roundTripFunc func(*http.Request) (*http.Response, error) + +func (f roundTripFunc) RoundTrip(req *http.Request) (*http.Response, error) { + return f(req) +} + +func TestValidatedTransport_CacheHostValidation(t *testing.T) { + originalValidate := validateResolvedIP + defer func() { validateResolvedIP = originalValidate }() + + var validateCalls int32 + validateResolvedIP = func(host string) error { + atomic.AddInt32(&validateCalls, 1) + require.Equal(t, "api.openai.com", host) + return nil + } + + var baseCalls int32 + base := roundTripFunc(func(_ *http.Request) (*http.Response, error) { + atomic.AddInt32(&baseCalls, 1) + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{}`)), + Header: make(http.Header), + }, nil + }) + + now := time.Unix(1730000000, 0) + transport := newValidatedTransport(base) + transport.now = func() time.Time { return now } + + req, err := http.NewRequest(http.MethodGet, "https://api.openai.com/v1/responses", nil) + require.NoError(t, err) + + _, err = transport.RoundTrip(req) + require.NoError(t, err) + _, err = transport.RoundTrip(req) + require.NoError(t, err) + + require.Equal(t, int32(1), atomic.LoadInt32(&validateCalls)) + require.Equal(t, int32(2), atomic.LoadInt32(&baseCalls)) +} + +func TestValidatedTransport_ExpiredCacheTriggersRevalidation(t *testing.T) { + originalValidate := validateResolvedIP + defer func() { validateResolvedIP = originalValidate }() + + var validateCalls int32 + validateResolvedIP = func(_ string) error { + atomic.AddInt32(&validateCalls, 1) + return nil + } + + base := roundTripFunc(func(_ *http.Request) (*http.Response, error) { + return &http.Response{ + StatusCode: http.StatusOK, + Body: io.NopCloser(strings.NewReader(`{}`)), + Header: make(http.Header), + }, nil + }) + + now := time.Unix(1730001000, 0) + transport := newValidatedTransport(base) + transport.now = func() time.Time { return now } + + req, err := http.NewRequest(http.MethodGet, "https://api.openai.com/v1/responses", nil) + require.NoError(t, err) + + _, err = transport.RoundTrip(req) + require.NoError(t, err) + + now = now.Add(validatedHostTTL + time.Second) + _, err = transport.RoundTrip(req) + require.NoError(t, err) + + require.Equal(t, int32(2), atomic.LoadInt32(&validateCalls)) +} + +func TestValidatedTransport_ValidationErrorStopsRoundTrip(t *testing.T) { + originalValidate := validateResolvedIP + defer func() { validateResolvedIP = originalValidate }() + + expectedErr := errors.New("dns rebinding rejected") + validateResolvedIP = func(_ string) error { + return expectedErr + } + + var baseCalls int32 + base := roundTripFunc(func(_ *http.Request) (*http.Response, error) { + atomic.AddInt32(&baseCalls, 1) + return &http.Response{StatusCode: http.StatusOK, Body: io.NopCloser(strings.NewReader(`{}`))}, nil + }) + + transport := newValidatedTransport(base) + req, err := http.NewRequest(http.MethodGet, "https://api.openai.com/v1/responses", nil) + require.NoError(t, err) + + _, err = transport.RoundTrip(req) + require.ErrorIs(t, err, expectedErr) + require.Equal(t, int32(0), atomic.LoadInt32(&baseCalls)) +} diff --git a/backend/internal/pkg/httputil/body.go b/backend/internal/pkg/httputil/body.go new file mode 100644 index 000000000..69e99dc53 --- /dev/null +++ b/backend/internal/pkg/httputil/body.go @@ -0,0 +1,37 @@ +package httputil + +import ( + "bytes" + "io" + "net/http" +) + +const ( + requestBodyReadInitCap = 512 + requestBodyReadMaxInitCap = 1 << 20 +) + +// ReadRequestBodyWithPrealloc reads request body with preallocated buffer based on content length. +func ReadRequestBodyWithPrealloc(req *http.Request) ([]byte, error) { + if req == nil || req.Body == nil { + return nil, nil + } + + capHint := requestBodyReadInitCap + if req.ContentLength > 0 { + switch { + case req.ContentLength < int64(requestBodyReadInitCap): + capHint = requestBodyReadInitCap + case req.ContentLength > int64(requestBodyReadMaxInitCap): + capHint = requestBodyReadMaxInitCap + default: + capHint = int(req.ContentLength) + } + } + + buf := bytes.NewBuffer(make([]byte, 0, capHint)) + if _, err := io.Copy(buf, req.Body); err != nil { + return nil, err + } + return buf.Bytes(), nil +} diff --git a/backend/internal/pkg/ip/ip.go b/backend/internal/pkg/ip/ip.go index 3f05ac41a..f6f77c86e 100644 --- a/backend/internal/pkg/ip/ip.go +++ b/backend/internal/pkg/ip/ip.go @@ -67,6 +67,14 @@ func normalizeIP(ip string) string { // privateNets 预编译私有 IP CIDR 块,避免每次调用 isPrivateIP 时重复解析 var privateNets []*net.IPNet +// CompiledIPRules 表示预编译的 IP 匹配规则。 +// PatternCount 记录原始规则数量,用于保留“规则存在但全无效”时的行为语义。 +type CompiledIPRules struct { + CIDRs []*net.IPNet + IPs []net.IP + PatternCount int +} + func init() { for _, cidr := range []string{ "10.0.0.0/8", @@ -84,6 +92,53 @@ func init() { } } +// CompileIPRules 将 IP/CIDR 字符串规则预编译为可复用结构。 +// 非法规则会被忽略,但 PatternCount 会保留原始规则条数。 +func CompileIPRules(patterns []string) *CompiledIPRules { + compiled := &CompiledIPRules{ + CIDRs: make([]*net.IPNet, 0, len(patterns)), + IPs: make([]net.IP, 0, len(patterns)), + PatternCount: len(patterns), + } + for _, pattern := range patterns { + normalized := strings.TrimSpace(pattern) + if normalized == "" { + continue + } + if strings.Contains(normalized, "/") { + _, cidr, err := net.ParseCIDR(normalized) + if err != nil || cidr == nil { + continue + } + compiled.CIDRs = append(compiled.CIDRs, cidr) + continue + } + parsedIP := net.ParseIP(normalized) + if parsedIP == nil { + continue + } + compiled.IPs = append(compiled.IPs, parsedIP) + } + return compiled +} + +func matchesCompiledRules(parsedIP net.IP, rules *CompiledIPRules) bool { + if parsedIP == nil || rules == nil { + return false + } + for _, cidr := range rules.CIDRs { + if cidr.Contains(parsedIP) { + return true + } + } + for _, ruleIP := range rules.IPs { + if parsedIP.Equal(ruleIP) { + return true + } + } + return false +} + // isPrivateIP 检查 IP 是否为私有地址。 func isPrivateIP(ipStr string) bool { ip := net.ParseIP(ipStr) @@ -142,19 +197,32 @@ func MatchesAnyPattern(clientIP string, patterns []string) bool { // 2. 如果白名单不为空,IP 必须在白名单中 // 3. 如果白名单为空,允许访问(除非被黑名单拒绝) func CheckIPRestriction(clientIP string, whitelist, blacklist []string) (bool, string) { + return CheckIPRestrictionWithCompiledRules( + clientIP, + CompileIPRules(whitelist), + CompileIPRules(blacklist), + ) +} + +// CheckIPRestrictionWithCompiledRules 使用预编译规则检查 IP 是否允许访问。 +func CheckIPRestrictionWithCompiledRules(clientIP string, whitelist, blacklist *CompiledIPRules) (bool, string) { // 规范化 IP clientIP = normalizeIP(clientIP) if clientIP == "" { return false, "access denied" } + parsedIP := net.ParseIP(clientIP) + if parsedIP == nil { + return false, "access denied" + } // 1. 检查黑名单 - if len(blacklist) > 0 && MatchesAnyPattern(clientIP, blacklist) { + if blacklist != nil && blacklist.PatternCount > 0 && matchesCompiledRules(parsedIP, blacklist) { return false, "access denied" } // 2. 检查白名单(如果设置了白名单,IP 必须在其中) - if len(whitelist) > 0 && !MatchesAnyPattern(clientIP, whitelist) { + if whitelist != nil && whitelist.PatternCount > 0 && !matchesCompiledRules(parsedIP, whitelist) { return false, "access denied" } diff --git a/backend/internal/pkg/ip/ip_test.go b/backend/internal/pkg/ip/ip_test.go index 3839403c6..403b2d59e 100644 --- a/backend/internal/pkg/ip/ip_test.go +++ b/backend/internal/pkg/ip/ip_test.go @@ -73,3 +73,24 @@ func TestGetTrustedClientIPUsesGinClientIP(t *testing.T) { require.Equal(t, 200, w.Code) require.Equal(t, "9.9.9.9", w.Body.String()) } + +func TestCheckIPRestrictionWithCompiledRules(t *testing.T) { + whitelist := CompileIPRules([]string{"10.0.0.0/8", "192.168.1.2"}) + blacklist := CompileIPRules([]string{"10.1.1.1"}) + + allowed, reason := CheckIPRestrictionWithCompiledRules("10.2.3.4", whitelist, blacklist) + require.True(t, allowed) + require.Equal(t, "", reason) + + allowed, reason = CheckIPRestrictionWithCompiledRules("10.1.1.1", whitelist, blacklist) + require.False(t, allowed) + require.Equal(t, "access denied", reason) +} + +func TestCheckIPRestrictionWithCompiledRules_InvalidWhitelistStillDenies(t *testing.T) { + // 与旧实现保持一致:白名单有配置但全无效时,最终应拒绝访问。 + invalidWhitelist := CompileIPRules([]string{"not-a-valid-pattern"}) + allowed, reason := CheckIPRestrictionWithCompiledRules("8.8.8.8", invalidWhitelist, nil) + require.False(t, allowed) + require.Equal(t, "access denied", reason) +} diff --git a/backend/internal/pkg/logger/logger.go b/backend/internal/pkg/logger/logger.go index 80d925179..3fca706ec 100644 --- a/backend/internal/pkg/logger/logger.go +++ b/backend/internal/pkg/logger/logger.go @@ -10,6 +10,7 @@ import ( "path/filepath" "strings" "sync" + "sync/atomic" "time" "go.uber.org/zap" @@ -42,15 +43,19 @@ type LogEvent struct { var ( mu sync.RWMutex - global *zap.Logger - sugar *zap.SugaredLogger + global atomic.Pointer[zap.Logger] + sugar atomic.Pointer[zap.SugaredLogger] atomicLevel zap.AtomicLevel initOptions InitOptions - currentSink Sink + currentSink atomic.Value // sinkState stdLogUndo func() bootstrapOnce sync.Once ) +type sinkState struct { + sink Sink +} + func InitBootstrap() { bootstrapOnce.Do(func() { if err := Init(bootstrapOptions()); err != nil { @@ -72,9 +77,9 @@ func initLocked(options InitOptions) error { return err } - prev := global - global = zl - sugar = zl.Sugar() + prev := global.Load() + global.Store(zl) + sugar.Store(zl.Sugar()) atomicLevel = al initOptions = normalized @@ -115,24 +120,32 @@ func SetLevel(level string) error { func CurrentLevel() string { mu.RLock() defer mu.RUnlock() - if global == nil { + if global.Load() == nil { return "info" } return atomicLevel.Level().String() } func SetSink(sink Sink) { - mu.Lock() - defer mu.Unlock() - currentSink = sink + currentSink.Store(sinkState{sink: sink}) +} + +func loadSink() Sink { + v := currentSink.Load() + if v == nil { + return nil + } + state, ok := v.(sinkState) + if !ok { + return nil + } + return state.sink } // WriteSinkEvent 直接写入日志 sink,不经过全局日志级别门控。 // 用于需要“可观测性入库”与“业务输出级别”解耦的场景(例如 ops 系统日志索引)。 func WriteSinkEvent(level, component, message string, fields map[string]any) { - mu.RLock() - sink := currentSink - mu.RUnlock() + sink := loadSink() if sink == nil { return } @@ -168,19 +181,15 @@ func WriteSinkEvent(level, component, message string, fields map[string]any) { } func L() *zap.Logger { - mu.RLock() - defer mu.RUnlock() - if global != nil { - return global + if l := global.Load(); l != nil { + return l } return zap.NewNop() } func S() *zap.SugaredLogger { - mu.RLock() - defer mu.RUnlock() - if sugar != nil { - return sugar + if s := sugar.Load(); s != nil { + return s } return zap.NewNop().Sugar() } @@ -190,9 +199,7 @@ func With(fields ...zap.Field) *zap.Logger { } func Sync() { - mu.RLock() - l := global - mu.RUnlock() + l := global.Load() if l != nil { _ = l.Sync() } @@ -210,7 +217,11 @@ func bridgeStdLogLocked() { log.SetFlags(0) log.SetPrefix("") - log.SetOutput(newStdLogBridge(global.Named("stdlog"))) + base := global.Load() + if base == nil { + base = zap.NewNop() + } + log.SetOutput(newStdLogBridge(base.Named("stdlog"))) stdLogUndo = func() { log.SetOutput(prevWriter) @@ -220,7 +231,11 @@ func bridgeStdLogLocked() { } func bridgeSlogLocked() { - slog.SetDefault(slog.New(newSlogZapHandler(global.Named("slog")))) + base := global.Load() + if base == nil { + base = zap.NewNop() + } + slog.SetDefault(slog.New(newSlogZapHandler(base.Named("slog")))) } func buildLogger(options InitOptions) (*zap.Logger, zap.AtomicLevel, error) { @@ -363,9 +378,7 @@ func (s *sinkCore) Check(entry zapcore.Entry, ce *zapcore.CheckedEntry) *zapcore func (s *sinkCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { // Only handle sink forwarding — the inner cores write via their own // Write methods (added to CheckedEntry by s.core.Check above). - mu.RLock() - sink := currentSink - mu.RUnlock() + sink := loadSink() if sink == nil { return nil } @@ -454,7 +467,7 @@ func inferStdLogLevel(msg string) Level { if strings.Contains(lower, " failed") || strings.Contains(lower, "error") || strings.Contains(lower, "panic") || strings.Contains(lower, "fatal") { return LevelError } - if strings.Contains(lower, "warning") || strings.Contains(lower, "warn") || strings.Contains(lower, " retry") || strings.Contains(lower, " queue full") || strings.Contains(lower, "fallback") { + if strings.Contains(lower, "warning") || strings.Contains(lower, "warn") || strings.Contains(lower, " queue full") || strings.Contains(lower, "fallback") { return LevelWarn } return LevelInfo @@ -467,9 +480,7 @@ func LegacyPrintf(component, format string, args ...any) { return } - mu.RLock() - initialized := global != nil - mu.RUnlock() + initialized := global.Load() != nil if !initialized { // 在日志系统未初始化前,回退到标准库 log,避免测试/工具链丢日志。 log.Print(msg) diff --git a/backend/internal/pkg/logger/slog_handler.go b/backend/internal/pkg/logger/slog_handler.go index 562b8341d..602ca1e05 100644 --- a/backend/internal/pkg/logger/slog_handler.go +++ b/backend/internal/pkg/logger/slog_handler.go @@ -48,16 +48,15 @@ func (h *slogZapHandler) Handle(_ context.Context, record slog.Record) error { return true }) - entry := h.logger.With(fields...) switch { case record.Level >= slog.LevelError: - entry.Error(record.Message) + h.logger.Error(record.Message, fields...) case record.Level >= slog.LevelWarn: - entry.Warn(record.Message) + h.logger.Warn(record.Message, fields...) case record.Level <= slog.LevelDebug: - entry.Debug(record.Message) + h.logger.Debug(record.Message, fields...) default: - entry.Info(record.Message) + h.logger.Info(record.Message, fields...) } return nil } diff --git a/backend/internal/pkg/logger/stdlog_bridge_test.go b/backend/internal/pkg/logger/stdlog_bridge_test.go index a3f76fd70..4482a2ecd 100644 --- a/backend/internal/pkg/logger/stdlog_bridge_test.go +++ b/backend/internal/pkg/logger/stdlog_bridge_test.go @@ -16,6 +16,7 @@ func TestInferStdLogLevel(t *testing.T) { {msg: "Warning: queue full", want: LevelWarn}, {msg: "Forward request failed: timeout", want: LevelError}, {msg: "[ERROR] upstream unavailable", want: LevelError}, + {msg: "[OpenAI WS Mode] reconnect_retry account_id=22 retry=1 max_retries=5", want: LevelInfo}, {msg: "service started", want: LevelInfo}, {msg: "debug: cache miss", want: LevelDebug}, } diff --git a/backend/internal/pkg/tlsfingerprint/dialer.go b/backend/internal/pkg/tlsfingerprint/dialer.go index 992f8b0ab..4f25a34ab 100644 --- a/backend/internal/pkg/tlsfingerprint/dialer.go +++ b/backend/internal/pkg/tlsfingerprint/dialer.go @@ -268,8 +268,8 @@ func (d *SOCKS5ProxyDialer) DialTLSContext(ctx context.Context, network, addr st "cipher_suites", len(spec.CipherSuites), "extensions", len(spec.Extensions), "compression_methods", spec.CompressionMethods, - "tls_vers_max", fmt.Sprintf("0x%04x", spec.TLSVersMax), - "tls_vers_min", fmt.Sprintf("0x%04x", spec.TLSVersMin)) + "tls_vers_max", spec.TLSVersMax, + "tls_vers_min", spec.TLSVersMin) if d.profile != nil { slog.Debug("tls_fingerprint_socks5_using_profile", "name", d.profile.Name, "grease", d.profile.EnableGREASE) @@ -294,8 +294,8 @@ func (d *SOCKS5ProxyDialer) DialTLSContext(ctx context.Context, network, addr st state := tlsConn.ConnectionState() slog.Debug("tls_fingerprint_socks5_handshake_success", - "version", fmt.Sprintf("0x%04x", state.Version), - "cipher_suite", fmt.Sprintf("0x%04x", state.CipherSuite), + "version", state.Version, + "cipher_suite", state.CipherSuite, "alpn", state.NegotiatedProtocol) return tlsConn, nil @@ -404,8 +404,8 @@ func (d *HTTPProxyDialer) DialTLSContext(ctx context.Context, network, addr stri state := tlsConn.ConnectionState() slog.Debug("tls_fingerprint_http_proxy_handshake_success", - "version", fmt.Sprintf("0x%04x", state.Version), - "cipher_suite", fmt.Sprintf("0x%04x", state.CipherSuite), + "version", state.Version, + "cipher_suite", state.CipherSuite, "alpn", state.NegotiatedProtocol) return tlsConn, nil @@ -470,8 +470,8 @@ func (d *Dialer) DialTLSContext(ctx context.Context, network, addr string) (net. // Log successful handshake details state := tlsConn.ConnectionState() slog.Debug("tls_fingerprint_handshake_success", - "version", fmt.Sprintf("0x%04x", state.Version), - "cipher_suite", fmt.Sprintf("0x%04x", state.CipherSuite), + "version", state.Version, + "cipher_suite", state.CipherSuite, "alpn", state.NegotiatedProtocol) return tlsConn, nil diff --git a/backend/internal/repository/account_repo.go b/backend/internal/repository/account_repo.go index cf4671db2..4aa749284 100644 --- a/backend/internal/repository/account_repo.go +++ b/backend/internal/repository/account_repo.go @@ -50,11 +50,6 @@ type accountRepository struct { schedulerCache service.SchedulerCache } -type tempUnschedSnapshot struct { - until *time.Time - reason string -} - // NewAccountRepository 创建账户仓储实例。 // 这是对外暴露的构造函数,返回接口类型以便于依赖注入。 func NewAccountRepository(client *dbent.Client, sqlDB *sql.DB, schedulerCache service.SchedulerCache) service.AccountRepository { @@ -189,11 +184,6 @@ func (r *accountRepository) GetByIDs(ctx context.Context, ids []int64) ([]*servi accountIDs = append(accountIDs, acc.ID) } - tempUnschedMap, err := r.loadTempUnschedStates(ctx, accountIDs) - if err != nil { - return nil, err - } - groupsByAccount, groupIDsByAccount, accountGroupsByAccount, err := r.loadAccountGroups(ctx, accountIDs) if err != nil { return nil, err @@ -220,10 +210,6 @@ func (r *accountRepository) GetByIDs(ctx context.Context, ids []int64) ([]*servi if ags, ok := accountGroupsByAccount[entAcc.ID]; ok { out.AccountGroups = ags } - if snap, ok := tempUnschedMap[entAcc.ID]; ok { - out.TempUnschedulableUntil = snap.until - out.TempUnschedulableReason = snap.reason - } outByID[entAcc.ID] = out } @@ -1326,10 +1312,6 @@ func (r *accountRepository) accountsToService(ctx context.Context, accounts []*d if err != nil { return nil, err } - tempUnschedMap, err := r.loadTempUnschedStates(ctx, accountIDs) - if err != nil { - return nil, err - } groupsByAccount, groupIDsByAccount, accountGroupsByAccount, err := r.loadAccountGroups(ctx, accountIDs) if err != nil { return nil, err @@ -1355,10 +1337,6 @@ func (r *accountRepository) accountsToService(ctx context.Context, accounts []*d if ags, ok := accountGroupsByAccount[acc.ID]; ok { out.AccountGroups = ags } - if snap, ok := tempUnschedMap[acc.ID]; ok { - out.TempUnschedulableUntil = snap.until - out.TempUnschedulableReason = snap.reason - } outAccounts = append(outAccounts, *out) } @@ -1383,48 +1361,6 @@ func notExpiredPredicate(now time.Time) dbpredicate.Account { ) } -func (r *accountRepository) loadTempUnschedStates(ctx context.Context, accountIDs []int64) (map[int64]tempUnschedSnapshot, error) { - out := make(map[int64]tempUnschedSnapshot) - if len(accountIDs) == 0 { - return out, nil - } - - rows, err := r.sql.QueryContext(ctx, ` - SELECT id, temp_unschedulable_until, temp_unschedulable_reason - FROM accounts - WHERE id = ANY($1) - `, pq.Array(accountIDs)) - if err != nil { - return nil, err - } - defer func() { _ = rows.Close() }() - - for rows.Next() { - var id int64 - var until sql.NullTime - var reason sql.NullString - if err := rows.Scan(&id, &until, &reason); err != nil { - return nil, err - } - var untilPtr *time.Time - if until.Valid { - tmp := until.Time - untilPtr = &tmp - } - if reason.Valid { - out[id] = tempUnschedSnapshot{until: untilPtr, reason: reason.String} - } else { - out[id] = tempUnschedSnapshot{until: untilPtr, reason: ""} - } - } - - if err := rows.Err(); err != nil { - return nil, err - } - - return out, nil -} - func (r *accountRepository) loadProxies(ctx context.Context, proxyIDs []int64) (map[int64]*service.Proxy, error) { proxyMap := make(map[int64]*service.Proxy) if len(proxyIDs) == 0 { @@ -1535,31 +1471,33 @@ func accountEntityToService(m *dbent.Account) *service.Account { rateMultiplier := m.RateMultiplier return &service.Account{ - ID: m.ID, - Name: m.Name, - Notes: m.Notes, - Platform: m.Platform, - Type: m.Type, - Credentials: copyJSONMap(m.Credentials), - Extra: copyJSONMap(m.Extra), - ProxyID: m.ProxyID, - Concurrency: m.Concurrency, - Priority: m.Priority, - RateMultiplier: &rateMultiplier, - Status: m.Status, - ErrorMessage: derefString(m.ErrorMessage), - LastUsedAt: m.LastUsedAt, - ExpiresAt: m.ExpiresAt, - AutoPauseOnExpired: m.AutoPauseOnExpired, - CreatedAt: m.CreatedAt, - UpdatedAt: m.UpdatedAt, - Schedulable: m.Schedulable, - RateLimitedAt: m.RateLimitedAt, - RateLimitResetAt: m.RateLimitResetAt, - OverloadUntil: m.OverloadUntil, - SessionWindowStart: m.SessionWindowStart, - SessionWindowEnd: m.SessionWindowEnd, - SessionWindowStatus: derefString(m.SessionWindowStatus), + ID: m.ID, + Name: m.Name, + Notes: m.Notes, + Platform: m.Platform, + Type: m.Type, + Credentials: copyJSONMap(m.Credentials), + Extra: copyJSONMap(m.Extra), + ProxyID: m.ProxyID, + Concurrency: m.Concurrency, + Priority: m.Priority, + RateMultiplier: &rateMultiplier, + Status: m.Status, + ErrorMessage: derefString(m.ErrorMessage), + LastUsedAt: m.LastUsedAt, + ExpiresAt: m.ExpiresAt, + AutoPauseOnExpired: m.AutoPauseOnExpired, + CreatedAt: m.CreatedAt, + UpdatedAt: m.UpdatedAt, + Schedulable: m.Schedulable, + RateLimitedAt: m.RateLimitedAt, + RateLimitResetAt: m.RateLimitResetAt, + OverloadUntil: m.OverloadUntil, + TempUnschedulableUntil: m.TempUnschedulableUntil, + TempUnschedulableReason: derefString(m.TempUnschedulableReason), + SessionWindowStart: m.SessionWindowStart, + SessionWindowEnd: m.SessionWindowEnd, + SessionWindowStatus: derefString(m.SessionWindowStatus), } } diff --git a/backend/internal/repository/account_repo_integration_test.go b/backend/internal/repository/account_repo_integration_test.go index 4f9d0152c..fd48a5d45 100644 --- a/backend/internal/repository/account_repo_integration_test.go +++ b/backend/internal/repository/account_repo_integration_test.go @@ -500,6 +500,38 @@ func (s *AccountRepoSuite) TestClearRateLimit() { s.Require().Nil(got.OverloadUntil) } +func (s *AccountRepoSuite) TestTempUnschedulableFieldsLoadedByGetByIDAndGetByIDs() { + acc1 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-temp-1"}) + acc2 := mustCreateAccount(s.T(), s.client, &service.Account{Name: "acc-temp-2"}) + + until := time.Now().Add(15 * time.Minute).UTC().Truncate(time.Second) + reason := `{"rule":"429","matched_keyword":"too many requests"}` + s.Require().NoError(s.repo.SetTempUnschedulable(s.ctx, acc1.ID, until, reason)) + + gotByID, err := s.repo.GetByID(s.ctx, acc1.ID) + s.Require().NoError(err) + s.Require().NotNil(gotByID.TempUnschedulableUntil) + s.Require().WithinDuration(until, *gotByID.TempUnschedulableUntil, time.Second) + s.Require().Equal(reason, gotByID.TempUnschedulableReason) + + gotByIDs, err := s.repo.GetByIDs(s.ctx, []int64{acc2.ID, acc1.ID}) + s.Require().NoError(err) + s.Require().Len(gotByIDs, 2) + s.Require().Equal(acc2.ID, gotByIDs[0].ID) + s.Require().Nil(gotByIDs[0].TempUnschedulableUntil) + s.Require().Equal("", gotByIDs[0].TempUnschedulableReason) + s.Require().Equal(acc1.ID, gotByIDs[1].ID) + s.Require().NotNil(gotByIDs[1].TempUnschedulableUntil) + s.Require().WithinDuration(until, *gotByIDs[1].TempUnschedulableUntil, time.Second) + s.Require().Equal(reason, gotByIDs[1].TempUnschedulableReason) + + s.Require().NoError(s.repo.ClearTempUnschedulable(s.ctx, acc1.ID)) + cleared, err := s.repo.GetByID(s.ctx, acc1.ID) + s.Require().NoError(err) + s.Require().Nil(cleared.TempUnschedulableUntil) + s.Require().Equal("", cleared.TempUnschedulableReason) +} + // --- UpdateLastUsed --- func (s *AccountRepoSuite) TestUpdateLastUsed() { diff --git a/backend/internal/repository/concurrency_cache.go b/backend/internal/repository/concurrency_cache.go index e047bff08..a2552715c 100644 --- a/backend/internal/repository/concurrency_cache.go +++ b/backend/internal/repository/concurrency_cache.go @@ -227,6 +227,43 @@ func (c *concurrencyCache) GetAccountConcurrency(ctx context.Context, accountID return result, nil } +func (c *concurrencyCache) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { + if len(accountIDs) == 0 { + return map[int64]int{}, nil + } + + now, err := c.rdb.Time(ctx).Result() + if err != nil { + return nil, fmt.Errorf("redis TIME: %w", err) + } + cutoffTime := now.Unix() - int64(c.slotTTLSeconds) + + pipe := c.rdb.Pipeline() + type accountCmd struct { + accountID int64 + zcardCmd *redis.IntCmd + } + cmds := make([]accountCmd, 0, len(accountIDs)) + for _, accountID := range accountIDs { + slotKey := accountSlotKeyPrefix + strconv.FormatInt(accountID, 10) + pipe.ZRemRangeByScore(ctx, slotKey, "-inf", strconv.FormatInt(cutoffTime, 10)) + cmds = append(cmds, accountCmd{ + accountID: accountID, + zcardCmd: pipe.ZCard(ctx, slotKey), + }) + } + + if _, err := pipe.Exec(ctx); err != nil && !errors.Is(err, redis.Nil) { + return nil, fmt.Errorf("pipeline exec: %w", err) + } + + result := make(map[int64]int, len(accountIDs)) + for _, cmd := range cmds { + result[cmd.accountID] = int(cmd.zcardCmd.Val()) + } + return result, nil +} + // User slot operations func (c *concurrencyCache) AcquireUserSlot(ctx context.Context, userID int64, maxConcurrency int, requestID string) (bool, error) { diff --git a/backend/internal/repository/group_repo.go b/backend/internal/repository/group_repo.go index 1aba210fe..77f1c494b 100644 --- a/backend/internal/repository/group_repo.go +++ b/backend/internal/repository/group_repo.go @@ -4,6 +4,8 @@ import ( "context" "database/sql" "errors" + "fmt" + "strings" dbent "github.com/Wei-Shaw/sub2api/ent" "github.com/Wei-Shaw/sub2api/ent/apikey" @@ -560,22 +562,72 @@ func (r *groupRepository) UpdateSortOrders(ctx context.Context, updates []servic return nil } - // 使用事务批量更新 - tx, err := r.client.Tx(ctx) - if err != nil { + // 去重后保留最后一次排序值,避免重复 ID 造成 CASE 分支冲突。 + sortOrderByID := make(map[int64]int, len(updates)) + groupIDs := make([]int64, 0, len(updates)) + for _, u := range updates { + if u.ID <= 0 { + continue + } + if _, exists := sortOrderByID[u.ID]; !exists { + groupIDs = append(groupIDs, u.ID) + } + sortOrderByID[u.ID] = u.SortOrder + } + if len(groupIDs) == 0 { + return nil + } + + // 与旧实现保持一致:任何不存在/已删除的分组都返回 not found,且不执行更新。 + var existingCount int + if err := scanSingleRow( + ctx, + r.sql, + `SELECT COUNT(*) FROM groups WHERE deleted_at IS NULL AND id = ANY($1)`, + []any{pq.Array(groupIDs)}, + &existingCount, + ); err != nil { return err } - defer func() { _ = tx.Rollback() }() + if existingCount != len(groupIDs) { + return service.ErrGroupNotFound + } - for _, u := range updates { - if _, err := tx.Group.UpdateOneID(u.ID).SetSortOrder(u.SortOrder).Save(ctx); err != nil { - return translatePersistenceError(err, service.ErrGroupNotFound, nil) - } + args := make([]any, 0, len(groupIDs)*2+1) + caseClauses := make([]string, 0, len(groupIDs)) + placeholder := 1 + for _, id := range groupIDs { + caseClauses = append(caseClauses, fmt.Sprintf("WHEN $%d THEN $%d", placeholder, placeholder+1)) + args = append(args, id, sortOrderByID[id]) + placeholder += 2 } + args = append(args, pq.Array(groupIDs)) - if err := tx.Commit(); err != nil { + query := fmt.Sprintf(` + UPDATE groups + SET sort_order = CASE id + %s + ELSE sort_order + END + WHERE deleted_at IS NULL AND id = ANY($%d) + `, strings.Join(caseClauses, "\n\t\t\t"), placeholder) + + result, err := r.sql.ExecContext(ctx, query, args...) + if err != nil { return err } + affected, err := result.RowsAffected() + if err != nil { + return err + } + if affected != int64(len(groupIDs)) { + return service.ErrGroupNotFound + } + for _, id := range groupIDs { + if err := enqueueSchedulerOutbox(ctx, r.sql, service.SchedulerOutboxEventGroupChanged, nil, &id, nil); err != nil { + logger.LegacyPrintf("repository.group", "[SchedulerOutbox] enqueue group sort update failed: group=%d err=%v", id, err) + } + } return nil } diff --git a/backend/internal/repository/group_repo_integration_test.go b/backend/internal/repository/group_repo_integration_test.go index c31a9ec4e..4a849a460 100644 --- a/backend/internal/repository/group_repo_integration_test.go +++ b/backend/internal/repository/group_repo_integration_test.go @@ -352,6 +352,81 @@ func (s *GroupRepoSuite) TestListWithFilters_Search() { }) } +func (s *GroupRepoSuite) TestUpdateSortOrders_BatchCaseWhen() { + g1 := &service.Group{ + Name: "sort-g1", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + g2 := &service.Group{ + Name: "sort-g2", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + g3 := &service.Group{ + Name: "sort-g3", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g1)) + s.Require().NoError(s.repo.Create(s.ctx, g2)) + s.Require().NoError(s.repo.Create(s.ctx, g3)) + + err := s.repo.UpdateSortOrders(s.ctx, []service.GroupSortOrderUpdate{ + {ID: g1.ID, SortOrder: 30}, + {ID: g2.ID, SortOrder: 10}, + {ID: g3.ID, SortOrder: 20}, + {ID: g2.ID, SortOrder: 15}, // 重复 ID 应以最后一次为准 + }) + s.Require().NoError(err) + + got1, err := s.repo.GetByID(s.ctx, g1.ID) + s.Require().NoError(err) + got2, err := s.repo.GetByID(s.ctx, g2.ID) + s.Require().NoError(err) + got3, err := s.repo.GetByID(s.ctx, g3.ID) + s.Require().NoError(err) + s.Require().Equal(30, got1.SortOrder) + s.Require().Equal(15, got2.SortOrder) + s.Require().Equal(20, got3.SortOrder) +} + +func (s *GroupRepoSuite) TestUpdateSortOrders_MissingGroupNoPartialUpdate() { + g1 := &service.Group{ + Name: "sort-no-partial", + Platform: service.PlatformAnthropic, + RateMultiplier: 1.0, + IsExclusive: false, + Status: service.StatusActive, + SubscriptionType: service.SubscriptionTypeStandard, + } + s.Require().NoError(s.repo.Create(s.ctx, g1)) + + before, err := s.repo.GetByID(s.ctx, g1.ID) + s.Require().NoError(err) + beforeSort := before.SortOrder + + err = s.repo.UpdateSortOrders(s.ctx, []service.GroupSortOrderUpdate{ + {ID: g1.ID, SortOrder: 99}, + {ID: 99999999, SortOrder: 1}, + }) + s.Require().Error(err) + s.Require().ErrorIs(err, service.ErrGroupNotFound) + + after, err := s.repo.GetByID(s.ctx, g1.ID) + s.Require().NoError(err) + s.Require().Equal(beforeSort, after.SortOrder) +} + func (s *GroupRepoSuite) TestListWithFilters_AccountCount() { g1 := &service.Group{ Name: "g1", diff --git a/backend/internal/repository/migrations_runner.go b/backend/internal/repository/migrations_runner.go index 8cfaf4e6e..a60ba2946 100644 --- a/backend/internal/repository/migrations_runner.go +++ b/backend/internal/repository/migrations_runner.go @@ -50,6 +50,7 @@ CREATE TABLE IF NOT EXISTS atlas_schema_revisions ( // 任何稳定的 int64 值都可以,只要不与同一数据库中的其他锁冲突即可。 const migrationsAdvisoryLockID int64 = 694208311321144027 const migrationsLockRetryInterval = 500 * time.Millisecond +const nonTransactionalMigrationSuffix = "_notx.sql" type migrationChecksumCompatibilityRule struct { fileChecksum string @@ -185,8 +186,34 @@ func applyMigrationsFS(ctx context.Context, db *sql.DB, fsys fs.FS) error { return fmt.Errorf("check migration %s: %w", name, rowErr) } - // 迁移未应用,在事务中执行。 - // 使用事务确保迁移的原子性:要么完全成功,要么完全回滚。 + nonTx, err := validateMigrationExecutionMode(name, content) + if err != nil { + return fmt.Errorf("validate migration %s: %w", name, err) + } + + if nonTx { + // *_notx.sql:用于 CREATE/DROP INDEX CONCURRENTLY 场景,必须非事务执行。 + // 逐条语句执行,避免将多条 CONCURRENTLY 语句放入同一个隐式事务块。 + statements := splitSQLStatements(content) + for i, stmt := range statements { + trimmed := strings.TrimSpace(stmt) + if trimmed == "" { + continue + } + if stripSQLLineComment(trimmed) == "" { + continue + } + if _, err := db.ExecContext(ctx, trimmed); err != nil { + return fmt.Errorf("apply migration %s (non-tx statement %d): %w", name, i+1, err) + } + } + if _, err := db.ExecContext(ctx, "INSERT INTO schema_migrations (filename, checksum) VALUES ($1, $2)", name, checksum); err != nil { + return fmt.Errorf("record migration %s (non-tx): %w", name, err) + } + continue + } + + // 默认迁移在事务中执行,确保原子性:要么完全成功,要么完全回滚。 tx, err := db.BeginTx(ctx, nil) if err != nil { return fmt.Errorf("begin migration %s: %w", name, err) @@ -300,6 +327,72 @@ func isMigrationChecksumCompatible(name, dbChecksum, fileChecksum string) bool { return ok } +func validateMigrationExecutionMode(name, content string) (bool, error) { + normalizedName := strings.ToLower(strings.TrimSpace(name)) + upperContent := strings.ToUpper(content) + nonTx := strings.HasSuffix(normalizedName, nonTransactionalMigrationSuffix) + + if !nonTx { + if strings.Contains(upperContent, "CONCURRENTLY") { + return false, errors.New("CONCURRENTLY statements must be placed in *_notx.sql migrations") + } + return false, nil + } + + if strings.Contains(upperContent, "BEGIN") || strings.Contains(upperContent, "COMMIT") || strings.Contains(upperContent, "ROLLBACK") { + return false, errors.New("*_notx.sql must not contain transaction control statements (BEGIN/COMMIT/ROLLBACK)") + } + + statements := splitSQLStatements(content) + for _, stmt := range statements { + normalizedStmt := strings.ToUpper(stripSQLLineComment(strings.TrimSpace(stmt))) + if normalizedStmt == "" { + continue + } + + if strings.Contains(normalizedStmt, "CONCURRENTLY") { + isCreateIndex := strings.Contains(normalizedStmt, "CREATE") && strings.Contains(normalizedStmt, "INDEX") + isDropIndex := strings.Contains(normalizedStmt, "DROP") && strings.Contains(normalizedStmt, "INDEX") + if !isCreateIndex && !isDropIndex { + return false, errors.New("*_notx.sql currently only supports CREATE/DROP INDEX CONCURRENTLY statements") + } + if isCreateIndex && !strings.Contains(normalizedStmt, "IF NOT EXISTS") { + return false, errors.New("CREATE INDEX CONCURRENTLY in *_notx.sql must include IF NOT EXISTS for idempotency") + } + if isDropIndex && !strings.Contains(normalizedStmt, "IF EXISTS") { + return false, errors.New("DROP INDEX CONCURRENTLY in *_notx.sql must include IF EXISTS for idempotency") + } + continue + } + + return false, errors.New("*_notx.sql must not mix non-CONCURRENTLY SQL statements") + } + + return true, nil +} + +func splitSQLStatements(content string) []string { + parts := strings.Split(content, ";") + out := make([]string, 0, len(parts)) + for _, part := range parts { + if strings.TrimSpace(part) == "" { + continue + } + out = append(out, part) + } + return out +} + +func stripSQLLineComment(s string) string { + lines := strings.Split(s, "\n") + for i, line := range lines { + if idx := strings.Index(line, "--"); idx >= 0 { + lines[i] = line[:idx] + } + } + return strings.TrimSpace(strings.Join(lines, "\n")) +} + // pgAdvisoryLock 获取 PostgreSQL Advisory Lock。 // Advisory Lock 是一种轻量级的锁机制,不与任何特定的数据库对象关联。 // 它非常适合用于应用层面的分布式锁场景,如迁移序列化。 diff --git a/backend/internal/repository/migrations_runner_notx_test.go b/backend/internal/repository/migrations_runner_notx_test.go new file mode 100644 index 000000000..cdc880283 --- /dev/null +++ b/backend/internal/repository/migrations_runner_notx_test.go @@ -0,0 +1,164 @@ +package repository + +import ( + "context" + "database/sql" + "testing" + "testing/fstest" + + sqlmock "github.com/DATA-DOG/go-sqlmock" + "github.com/stretchr/testify/require" +) + +func TestValidateMigrationExecutionMode(t *testing.T) { + t.Run("事务迁移包含CONCURRENTLY会被拒绝", func(t *testing.T) { + nonTx, err := validateMigrationExecutionMode("001_add_idx.sql", "CREATE INDEX CONCURRENTLY idx_a ON t(a);") + require.False(t, nonTx) + require.Error(t, err) + }) + + t.Run("notx迁移要求CREATE使用IF NOT EXISTS", func(t *testing.T) { + nonTx, err := validateMigrationExecutionMode("001_add_idx_notx.sql", "CREATE INDEX CONCURRENTLY idx_a ON t(a);") + require.False(t, nonTx) + require.Error(t, err) + }) + + t.Run("notx迁移要求DROP使用IF EXISTS", func(t *testing.T) { + nonTx, err := validateMigrationExecutionMode("001_drop_idx_notx.sql", "DROP INDEX CONCURRENTLY idx_a;") + require.False(t, nonTx) + require.Error(t, err) + }) + + t.Run("notx迁移禁止事务控制语句", func(t *testing.T) { + nonTx, err := validateMigrationExecutionMode("001_add_idx_notx.sql", "BEGIN; CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_a ON t(a); COMMIT;") + require.False(t, nonTx) + require.Error(t, err) + }) + + t.Run("notx迁移禁止混用非CONCURRENTLY语句", func(t *testing.T) { + nonTx, err := validateMigrationExecutionMode("001_add_idx_notx.sql", "CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_a ON t(a); UPDATE t SET a = 1;") + require.False(t, nonTx) + require.Error(t, err) + }) + + t.Run("notx迁移允许幂等并发索引语句", func(t *testing.T) { + nonTx, err := validateMigrationExecutionMode("001_add_idx_notx.sql", ` +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_a ON t(a); +DROP INDEX CONCURRENTLY IF EXISTS idx_b; +`) + require.True(t, nonTx) + require.NoError(t, err) + }) +} + +func TestApplyMigrationsFS_NonTransactionalMigration(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + mock.ExpectQuery("SELECT checksum FROM schema_migrations WHERE filename = \\$1"). + WithArgs("001_add_idx_notx.sql"). + WillReturnError(sql.ErrNoRows) + mock.ExpectExec("CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_t_a ON t\\(a\\)"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec("INSERT INTO schema_migrations \\(filename, checksum\\) VALUES \\(\\$1, \\$2\\)"). + WithArgs("001_add_idx_notx.sql", sqlmock.AnyArg()). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "001_add_idx_notx.sql": &fstest.MapFile{ + Data: []byte("CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_t_a ON t(a);"), + }, + } + + err = applyMigrationsFS(context.Background(), db, fsys) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestApplyMigrationsFS_NonTransactionalMigration_MultiStatements(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + mock.ExpectQuery("SELECT checksum FROM schema_migrations WHERE filename = \\$1"). + WithArgs("001_add_multi_idx_notx.sql"). + WillReturnError(sql.ErrNoRows) + mock.ExpectExec("CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_t_a ON t\\(a\\)"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec("CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_t_b ON t\\(b\\)"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec("INSERT INTO schema_migrations \\(filename, checksum\\) VALUES \\(\\$1, \\$2\\)"). + WithArgs("001_add_multi_idx_notx.sql", sqlmock.AnyArg()). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "001_add_multi_idx_notx.sql": &fstest.MapFile{ + Data: []byte(` +-- first +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_t_a ON t(a); +-- second +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_t_b ON t(b); +`), + }, + } + + err = applyMigrationsFS(context.Background(), db, fsys) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func TestApplyMigrationsFS_TransactionalMigration(t *testing.T) { + db, mock, err := sqlmock.New() + require.NoError(t, err) + defer db.Close() + + prepareMigrationsBootstrapExpectations(mock) + mock.ExpectQuery("SELECT checksum FROM schema_migrations WHERE filename = \\$1"). + WithArgs("001_add_col.sql"). + WillReturnError(sql.ErrNoRows) + mock.ExpectBegin() + mock.ExpectExec("ALTER TABLE t ADD COLUMN name TEXT"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectExec("INSERT INTO schema_migrations \\(filename, checksum\\) VALUES \\(\\$1, \\$2\\)"). + WithArgs("001_add_col.sql", sqlmock.AnyArg()). + WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() + mock.ExpectExec("SELECT pg_advisory_unlock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnResult(sqlmock.NewResult(0, 1)) + + fsys := fstest.MapFS{ + "001_add_col.sql": &fstest.MapFile{ + Data: []byte("ALTER TABLE t ADD COLUMN name TEXT;"), + }, + } + + err = applyMigrationsFS(context.Background(), db, fsys) + require.NoError(t, err) + require.NoError(t, mock.ExpectationsWereMet()) +} + +func prepareMigrationsBootstrapExpectations(mock sqlmock.Sqlmock) { + mock.ExpectQuery("SELECT pg_try_advisory_lock\\(\\$1\\)"). + WithArgs(migrationsAdvisoryLockID). + WillReturnRows(sqlmock.NewRows([]string{"pg_try_advisory_lock"}).AddRow(true)) + mock.ExpectExec("CREATE TABLE IF NOT EXISTS schema_migrations"). + WillReturnResult(sqlmock.NewResult(0, 0)) + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("schema_migrations"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT EXISTS \\("). + WithArgs("atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"exists"}).AddRow(true)) + mock.ExpectQuery("SELECT COUNT\\(\\*\\) FROM atlas_schema_revisions"). + WillReturnRows(sqlmock.NewRows([]string{"count"}).AddRow(1)) +} diff --git a/backend/internal/repository/usage_log_repo.go b/backend/internal/repository/usage_log_repo.go index 01dc69b55..4a08904d3 100644 --- a/backend/internal/repository/usage_log_repo.go +++ b/backend/internal/repository/usage_log_repo.go @@ -498,25 +498,46 @@ func (r *usageLogRepository) fillDashboardUsageStatsAggregated(ctx context.Conte } func (r *usageLogRepository) fillDashboardUsageStatsFromUsageLogs(ctx context.Context, stats *DashboardStats, startUTC, endUTC, todayUTC, now time.Time) error { - totalStatsQuery := ` + todayEnd := todayUTC.Add(24 * time.Hour) + combinedStatsQuery := ` + WITH scoped AS ( + SELECT + created_at, + input_tokens, + output_tokens, + cache_creation_tokens, + cache_read_tokens, + total_cost, + actual_cost, + COALESCE(duration_ms, 0) AS duration_ms + FROM usage_logs + WHERE created_at >= LEAST($1::timestamptz, $3::timestamptz) + AND created_at < GREATEST($2::timestamptz, $4::timestamptz) + ) SELECT - COUNT(*) as total_requests, - COALESCE(SUM(input_tokens), 0) as total_input_tokens, - COALESCE(SUM(output_tokens), 0) as total_output_tokens, - COALESCE(SUM(cache_creation_tokens), 0) as total_cache_creation_tokens, - COALESCE(SUM(cache_read_tokens), 0) as total_cache_read_tokens, - COALESCE(SUM(total_cost), 0) as total_cost, - COALESCE(SUM(actual_cost), 0) as total_actual_cost, - COALESCE(SUM(COALESCE(duration_ms, 0)), 0) as total_duration_ms - FROM usage_logs - WHERE created_at >= $1 AND created_at < $2 + COUNT(*) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz) AS total_requests, + COALESCE(SUM(input_tokens) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_input_tokens, + COALESCE(SUM(output_tokens) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_output_tokens, + COALESCE(SUM(cache_creation_tokens) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_cache_read_tokens, + COALESCE(SUM(total_cost) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_cost, + COALESCE(SUM(actual_cost) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_actual_cost, + COALESCE(SUM(duration_ms) FILTER (WHERE created_at >= $1::timestamptz AND created_at < $2::timestamptz), 0) AS total_duration_ms, + COUNT(*) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz) AS today_requests, + COALESCE(SUM(input_tokens) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz), 0) AS today_input_tokens, + COALESCE(SUM(output_tokens) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz), 0) AS today_output_tokens, + COALESCE(SUM(cache_creation_tokens) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz), 0) AS today_cache_creation_tokens, + COALESCE(SUM(cache_read_tokens) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz), 0) AS today_cache_read_tokens, + COALESCE(SUM(total_cost) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz), 0) AS today_cost, + COALESCE(SUM(actual_cost) FILTER (WHERE created_at >= $3::timestamptz AND created_at < $4::timestamptz), 0) AS today_actual_cost + FROM scoped ` var totalDurationMs int64 if err := scanSingleRow( ctx, r.sql, - totalStatsQuery, - []any{startUTC, endUTC}, + combinedStatsQuery, + []any{startUTC, endUTC, todayUTC, todayEnd}, &stats.TotalRequests, &stats.TotalInputTokens, &stats.TotalOutputTokens, @@ -525,32 +546,6 @@ func (r *usageLogRepository) fillDashboardUsageStatsFromUsageLogs(ctx context.Co &stats.TotalCost, &stats.TotalActualCost, &totalDurationMs, - ); err != nil { - return err - } - stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens - if stats.TotalRequests > 0 { - stats.AverageDurationMs = float64(totalDurationMs) / float64(stats.TotalRequests) - } - - todayEnd := todayUTC.Add(24 * time.Hour) - todayStatsQuery := ` - SELECT - COUNT(*) as today_requests, - COALESCE(SUM(input_tokens), 0) as today_input_tokens, - COALESCE(SUM(output_tokens), 0) as today_output_tokens, - COALESCE(SUM(cache_creation_tokens), 0) as today_cache_creation_tokens, - COALESCE(SUM(cache_read_tokens), 0) as today_cache_read_tokens, - COALESCE(SUM(total_cost), 0) as today_cost, - COALESCE(SUM(actual_cost), 0) as today_actual_cost - FROM usage_logs - WHERE created_at >= $1 AND created_at < $2 - ` - if err := scanSingleRow( - ctx, - r.sql, - todayStatsQuery, - []any{todayUTC, todayEnd}, &stats.TodayRequests, &stats.TodayInputTokens, &stats.TodayOutputTokens, @@ -561,25 +556,28 @@ func (r *usageLogRepository) fillDashboardUsageStatsFromUsageLogs(ctx context.Co ); err != nil { return err } - stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens - - activeUsersQuery := ` - SELECT COUNT(DISTINCT user_id) as active_users - FROM usage_logs - WHERE created_at >= $1 AND created_at < $2 - ` - if err := scanSingleRow(ctx, r.sql, activeUsersQuery, []any{todayUTC, todayEnd}, &stats.ActiveUsers); err != nil { - return err + stats.TotalTokens = stats.TotalInputTokens + stats.TotalOutputTokens + stats.TotalCacheCreationTokens + stats.TotalCacheReadTokens + if stats.TotalRequests > 0 { + stats.AverageDurationMs = float64(totalDurationMs) / float64(stats.TotalRequests) } + stats.TodayTokens = stats.TodayInputTokens + stats.TodayOutputTokens + stats.TodayCacheCreationTokens + stats.TodayCacheReadTokens + hourStart := now.UTC().Truncate(time.Hour) hourEnd := hourStart.Add(time.Hour) - hourlyActiveQuery := ` - SELECT COUNT(DISTINCT user_id) as active_users - FROM usage_logs - WHERE created_at >= $1 AND created_at < $2 + activeUsersQuery := ` + WITH scoped AS ( + SELECT user_id, created_at + FROM usage_logs + WHERE created_at >= LEAST($1::timestamptz, $3::timestamptz) + AND created_at < GREATEST($2::timestamptz, $4::timestamptz) + ) + SELECT + COUNT(DISTINCT CASE WHEN created_at >= $1::timestamptz AND created_at < $2::timestamptz THEN user_id END) AS active_users, + COUNT(DISTINCT CASE WHEN created_at >= $3::timestamptz AND created_at < $4::timestamptz THEN user_id END) AS hourly_active_users + FROM scoped ` - if err := scanSingleRow(ctx, r.sql, hourlyActiveQuery, []any{hourStart, hourEnd}, &stats.HourlyActiveUsers); err != nil { + if err := scanSingleRow(ctx, r.sql, activeUsersQuery, []any{todayUTC, todayEnd, hourStart, hourEnd}, &stats.ActiveUsers, &stats.HourlyActiveUsers); err != nil { return err } diff --git a/backend/internal/server/middleware/api_key_auth.go b/backend/internal/server/middleware/api_key_auth.go index 8fa3517a0..19f972396 100644 --- a/backend/internal/server/middleware/api_key_auth.go +++ b/backend/internal/server/middleware/api_key_auth.go @@ -97,7 +97,7 @@ func apiKeyAuthWithSubscription(apiKeyService *service.APIKeyService, subscripti // 注意:错误信息故意模糊,避免暴露具体的 IP 限制机制 if len(apiKey.IPWhitelist) > 0 || len(apiKey.IPBlacklist) > 0 { clientIP := ip.GetTrustedClientIP(c) - allowed, _ := ip.CheckIPRestriction(clientIP, apiKey.IPWhitelist, apiKey.IPBlacklist) + allowed, _ := ip.CheckIPRestrictionWithCompiledRules(clientIP, apiKey.CompiledIPWhitelist, apiKey.CompiledIPBlacklist) if !allowed { AbortWithError(c, 403, "ACCESS_DENIED", "Access denied") return diff --git a/backend/internal/server/middleware/api_key_auth_google.go b/backend/internal/server/middleware/api_key_auth_google.go index 9da1b1c61..84d93edc5 100644 --- a/backend/internal/server/middleware/api_key_auth_google.go +++ b/backend/internal/server/middleware/api_key_auth_google.go @@ -80,17 +80,25 @@ func APIKeyAuthWithSubscriptionGoogle(apiKeyService *service.APIKeyService, subs abortWithGoogleError(c, 403, "No active subscription found for this group") return } - if err := subscriptionService.ValidateSubscription(c.Request.Context(), subscription); err != nil { - abortWithGoogleError(c, 403, err.Error()) - return - } - _ = subscriptionService.CheckAndActivateWindow(c.Request.Context(), subscription) - _ = subscriptionService.CheckAndResetWindows(c.Request.Context(), subscription) - if err := subscriptionService.CheckUsageLimits(c.Request.Context(), subscription, apiKey.Group, 0); err != nil { - abortWithGoogleError(c, 429, err.Error()) + + needsMaintenance, err := subscriptionService.ValidateAndCheckLimits(subscription, apiKey.Group) + if err != nil { + status := 403 + if errors.Is(err, service.ErrDailyLimitExceeded) || + errors.Is(err, service.ErrWeeklyLimitExceeded) || + errors.Is(err, service.ErrMonthlyLimitExceeded) { + status = 429 + } + abortWithGoogleError(c, status, err.Error()) return } + c.Set(string(ContextKeySubscription), subscription) + + if needsMaintenance { + maintenanceCopy := *subscription + subscriptionService.DoWindowMaintenance(&maintenanceCopy) + } } else { if apiKey.User.Balance <= 0 { abortWithGoogleError(c, 403, "Insufficient account balance") diff --git a/backend/internal/server/middleware/api_key_auth_google_test.go b/backend/internal/server/middleware/api_key_auth_google_test.go index e4e0e253f..2124c86c9 100644 --- a/backend/internal/server/middleware/api_key_auth_google_test.go +++ b/backend/internal/server/middleware/api_key_auth_google_test.go @@ -23,6 +23,15 @@ type fakeAPIKeyRepo struct { updateLastUsed func(ctx context.Context, id int64, usedAt time.Time) error } +type fakeGoogleSubscriptionRepo struct { + getActive func(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) + updateStatus func(ctx context.Context, subscriptionID int64, status string) error + activateWindow func(ctx context.Context, id int64, start time.Time) error + resetDaily func(ctx context.Context, id int64, start time.Time) error + resetWeekly func(ctx context.Context, id int64, start time.Time) error + resetMonthly func(ctx context.Context, id int64, start time.Time) error +} + func (f fakeAPIKeyRepo) Create(ctx context.Context, key *service.APIKey) error { return errors.New("not implemented") } @@ -87,6 +96,85 @@ func (f fakeAPIKeyRepo) UpdateLastUsed(ctx context.Context, id int64, usedAt tim return nil } +func (f fakeGoogleSubscriptionRepo) Create(ctx context.Context, sub *service.UserSubscription) error { + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) GetByID(ctx context.Context, id int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) GetByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) GetActiveByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + if f.getActive != nil { + return f.getActive(ctx, userID, groupID) + } + return nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) Update(ctx context.Context, sub *service.UserSubscription) error { + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) Delete(ctx context.Context, id int64) error { + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ListByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ListActiveByUserID(ctx context.Context, userID int64) ([]service.UserSubscription, error) { + return nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ListByGroupID(ctx context.Context, groupID int64, params pagination.PaginationParams) ([]service.UserSubscription, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) List(ctx context.Context, params pagination.PaginationParams, userID, groupID *int64, status, sortBy, sortOrder string) ([]service.UserSubscription, *pagination.PaginationResult, error) { + return nil, nil, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ExistsByUserIDAndGroupID(ctx context.Context, userID, groupID int64) (bool, error) { + return false, errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ExtendExpiry(ctx context.Context, subscriptionID int64, newExpiresAt time.Time) error { + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) UpdateStatus(ctx context.Context, subscriptionID int64, status string) error { + if f.updateStatus != nil { + return f.updateStatus(ctx, subscriptionID, status) + } + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) UpdateNotes(ctx context.Context, subscriptionID int64, notes string) error { + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ActivateWindows(ctx context.Context, id int64, start time.Time) error { + if f.activateWindow != nil { + return f.activateWindow(ctx, id, start) + } + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ResetDailyUsage(ctx context.Context, id int64, start time.Time) error { + if f.resetDaily != nil { + return f.resetDaily(ctx, id, start) + } + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ResetWeeklyUsage(ctx context.Context, id int64, start time.Time) error { + if f.resetWeekly != nil { + return f.resetWeekly(ctx, id, start) + } + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) ResetMonthlyUsage(ctx context.Context, id int64, start time.Time) error { + if f.resetMonthly != nil { + return f.resetMonthly(ctx, id, start) + } + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) IncrementUsage(ctx context.Context, id int64, costUSD float64) error { + return errors.New("not implemented") +} +func (f fakeGoogleSubscriptionRepo) BatchUpdateExpiredStatus(ctx context.Context) (int64, error) { + return 0, errors.New("not implemented") +} + type googleErrorResponse struct { Error struct { Code int `json:"code"` @@ -505,3 +593,85 @@ func TestApiKeyAuthWithSubscriptionGoogle_TouchesLastUsedInStandardMode(t *testi require.Equal(t, http.StatusOK, rec.Code) require.Equal(t, 1, touchCalls) } + +func TestApiKeyAuthWithSubscriptionGoogle_SubscriptionLimitExceededReturns429(t *testing.T) { + gin.SetMode(gin.TestMode) + + limit := 1.0 + group := &service.Group{ + ID: 77, + Name: "gemini-sub", + Status: service.StatusActive, + Platform: service.PlatformGemini, + Hydrated: true, + SubscriptionType: service.SubscriptionTypeSubscription, + DailyLimitUSD: &limit, + } + user := &service.User{ + ID: 999, + Role: service.RoleUser, + Status: service.StatusActive, + Balance: 10, + Concurrency: 3, + } + apiKey := &service.APIKey{ + ID: 501, + UserID: user.ID, + Key: "google-sub-limit", + Status: service.StatusActive, + User: user, + Group: group, + } + apiKey.GroupID = &group.ID + + apiKeyService := newTestAPIKeyService(fakeAPIKeyRepo{ + getByKey: func(ctx context.Context, key string) (*service.APIKey, error) { + if key != apiKey.Key { + return nil, service.ErrAPIKeyNotFound + } + clone := *apiKey + return &clone, nil + }, + }) + + now := time.Now() + sub := &service.UserSubscription{ + ID: 601, + UserID: user.ID, + GroupID: group.ID, + Status: service.SubscriptionStatusActive, + ExpiresAt: now.Add(24 * time.Hour), + DailyWindowStart: &now, + DailyUsageUSD: 10, + } + subscriptionService := service.NewSubscriptionService(nil, fakeGoogleSubscriptionRepo{ + getActive: func(ctx context.Context, userID, groupID int64) (*service.UserSubscription, error) { + if userID != user.ID || groupID != group.ID { + return nil, service.ErrSubscriptionNotFound + } + clone := *sub + return &clone, nil + }, + updateStatus: func(ctx context.Context, subscriptionID int64, status string) error { return nil }, + activateWindow: func(ctx context.Context, id int64, start time.Time) error { return nil }, + resetDaily: func(ctx context.Context, id int64, start time.Time) error { return nil }, + resetWeekly: func(ctx context.Context, id int64, start time.Time) error { return nil }, + resetMonthly: func(ctx context.Context, id int64, start time.Time) error { return nil }, + }, nil, nil, &config.Config{RunMode: config.RunModeStandard}) + + r := gin.New() + r.Use(APIKeyAuthWithSubscriptionGoogle(apiKeyService, subscriptionService, &config.Config{RunMode: config.RunModeStandard})) + r.GET("/v1beta/test", func(c *gin.Context) { c.JSON(200, gin.H{"ok": true}) }) + + req := httptest.NewRequest(http.MethodGet, "/v1beta/test", nil) + req.Header.Set("x-goog-api-key", apiKey.Key) + rec := httptest.NewRecorder() + r.ServeHTTP(rec, req) + + require.Equal(t, http.StatusTooManyRequests, rec.Code) + var resp googleErrorResponse + require.NoError(t, json.Unmarshal(rec.Body.Bytes(), &resp)) + require.Equal(t, http.StatusTooManyRequests, resp.Error.Code) + require.Equal(t, "RESOURCE_EXHAUSTED", resp.Error.Status) + require.Contains(t, resp.Error.Message, "daily usage limit exceeded") +} diff --git a/backend/internal/server/middleware/security_headers.go b/backend/internal/server/middleware/security_headers.go index 67b19c09b..f061db90a 100644 --- a/backend/internal/server/middleware/security_headers.go +++ b/backend/internal/server/middleware/security_headers.go @@ -54,6 +54,10 @@ func SecurityHeaders(cfg config.CSPConfig) gin.HandlerFunc { c.Header("X-Content-Type-Options", "nosniff") c.Header("X-Frame-Options", "DENY") c.Header("Referrer-Policy", "strict-origin-when-cross-origin") + if isAPIRoutePath(c) { + c.Next() + return + } if cfg.Enabled { // Generate nonce for this request @@ -73,6 +77,18 @@ func SecurityHeaders(cfg config.CSPConfig) gin.HandlerFunc { } } +func isAPIRoutePath(c *gin.Context) bool { + if c == nil || c.Request == nil || c.Request.URL == nil { + return false + } + path := c.Request.URL.Path + return strings.HasPrefix(path, "/v1/") || + strings.HasPrefix(path, "/v1beta/") || + strings.HasPrefix(path, "/antigravity/") || + strings.HasPrefix(path, "/sora/") || + strings.HasPrefix(path, "/responses") +} + // enhanceCSPPolicy ensures the CSP policy includes nonce support and Cloudflare Insights domain. // This allows the application to work correctly even if the config file has an older CSP policy. func enhanceCSPPolicy(policy string) string { diff --git a/backend/internal/server/middleware/security_headers_test.go b/backend/internal/server/middleware/security_headers_test.go index 43462b82c..5a7798255 100644 --- a/backend/internal/server/middleware/security_headers_test.go +++ b/backend/internal/server/middleware/security_headers_test.go @@ -131,6 +131,26 @@ func TestSecurityHeaders(t *testing.T) { assert.Contains(t, csp, CloudflareInsightsDomain) }) + t.Run("api_route_skips_csp_nonce_generation", func(t *testing.T) { + cfg := config.CSPConfig{ + Enabled: true, + Policy: "default-src 'self'; script-src 'self' __CSP_NONCE__", + } + middleware := SecurityHeaders(cfg) + + w := httptest.NewRecorder() + c, _ := gin.CreateTestContext(w) + c.Request = httptest.NewRequest(http.MethodPost, "/v1/messages", nil) + + middleware(c) + + assert.Equal(t, "nosniff", w.Header().Get("X-Content-Type-Options")) + assert.Equal(t, "DENY", w.Header().Get("X-Frame-Options")) + assert.Equal(t, "strict-origin-when-cross-origin", w.Header().Get("Referrer-Policy")) + assert.Empty(t, w.Header().Get("Content-Security-Policy")) + assert.Empty(t, GetNonceFromContext(c)) + }) + t.Run("csp_enabled_with_nonce_placeholder", func(t *testing.T) { cfg := config.CSPConfig{ Enabled: true, diff --git a/backend/internal/server/routes/admin.go b/backend/internal/server/routes/admin.go index 888ca7516..8b8cdda2b 100644 --- a/backend/internal/server/routes/admin.go +++ b/backend/internal/server/routes/admin.go @@ -234,6 +234,7 @@ func registerAccountRoutes(admin *gin.RouterGroup, h *handler.Handlers) { accounts.POST("/:id/clear-error", h.Admin.Account.ClearError) accounts.GET("/:id/usage", h.Admin.Account.GetUsage) accounts.GET("/:id/today-stats", h.Admin.Account.GetTodayStats) + accounts.POST("/today-stats/batch", h.Admin.Account.GetBatchTodayStats) accounts.POST("/:id/clear-rate-limit", h.Admin.Account.ClearRateLimit) accounts.GET("/:id/temp-unschedulable", h.Admin.Account.GetTempUnschedulable) accounts.DELETE("/:id/temp-unschedulable", h.Admin.Account.ClearTempUnschedulable) diff --git a/backend/internal/service/account_usage_service.go b/backend/internal/service/account_usage_service.go index 35d369653..13a138567 100644 --- a/backend/internal/service/account_usage_service.go +++ b/backend/internal/service/account_usage_service.go @@ -9,7 +9,9 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/pkg/pagination" + "github.com/Wei-Shaw/sub2api/internal/pkg/timezone" "github.com/Wei-Shaw/sub2api/internal/pkg/usagestats" + "golang.org/x/sync/errgroup" ) type UsageLogRepository interface { @@ -62,6 +64,10 @@ type UsageLogRepository interface { GetDailyStatsAggregated(ctx context.Context, userID int64, startTime, endTime time.Time) ([]map[string]any, error) } +type accountWindowStatsBatchReader interface { + GetAccountWindowStatsBatch(ctx context.Context, accountIDs []int64, startTime time.Time) (map[int64]*usagestats.AccountStats, error) +} + // apiUsageCache 缓存从 Anthropic API 获取的使用率数据(utilization, resets_at) type apiUsageCache struct { response *ClaudeUsageResponse @@ -440,6 +446,78 @@ func (s *AccountUsageService) GetTodayStats(ctx context.Context, accountID int64 }, nil } +// GetTodayStatsBatch 批量获取账号今日统计,优先走批量 SQL,失败时回退单账号查询。 +func (s *AccountUsageService) GetTodayStatsBatch(ctx context.Context, accountIDs []int64) (map[int64]*WindowStats, error) { + uniqueIDs := make([]int64, 0, len(accountIDs)) + seen := make(map[int64]struct{}, len(accountIDs)) + for _, accountID := range accountIDs { + if accountID <= 0 { + continue + } + if _, exists := seen[accountID]; exists { + continue + } + seen[accountID] = struct{}{} + uniqueIDs = append(uniqueIDs, accountID) + } + + result := make(map[int64]*WindowStats, len(uniqueIDs)) + if len(uniqueIDs) == 0 { + return result, nil + } + + startTime := timezone.Today() + if batchReader, ok := s.usageLogRepo.(accountWindowStatsBatchReader); ok { + statsByAccount, err := batchReader.GetAccountWindowStatsBatch(ctx, uniqueIDs, startTime) + if err == nil { + for _, accountID := range uniqueIDs { + result[accountID] = windowStatsFromAccountStats(statsByAccount[accountID]) + } + return result, nil + } + } + + var mu sync.Mutex + g, gctx := errgroup.WithContext(ctx) + g.SetLimit(8) + + for _, accountID := range uniqueIDs { + id := accountID + g.Go(func() error { + stats, err := s.usageLogRepo.GetAccountWindowStats(gctx, id, startTime) + if err != nil { + return nil + } + mu.Lock() + result[id] = windowStatsFromAccountStats(stats) + mu.Unlock() + return nil + }) + } + + _ = g.Wait() + + for _, accountID := range uniqueIDs { + if _, ok := result[accountID]; !ok { + result[accountID] = &WindowStats{} + } + } + return result, nil +} + +func windowStatsFromAccountStats(stats *usagestats.AccountStats) *WindowStats { + if stats == nil { + return &WindowStats{} + } + return &WindowStats{ + Requests: stats.Requests, + Tokens: stats.Tokens, + Cost: stats.Cost, + StandardCost: stats.StandardCost, + UserCost: stats.UserCost, + } +} + func (s *AccountUsageService) GetAccountUsageStats(ctx context.Context, accountID int64, startTime, endTime time.Time) (*usagestats.AccountUsageStatsResponse, error) { stats, err := s.usageLogRepo.GetAccountUsageStats(ctx, accountID, startTime, endTime) if err != nil { diff --git a/backend/internal/service/antigravity_gateway_service.go b/backend/internal/service/antigravity_gateway_service.go index 108ff9ab2..aaf322912 100644 --- a/backend/internal/service/antigravity_gateway_service.go +++ b/backend/internal/service/antigravity_gateway_service.go @@ -21,7 +21,6 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/pkg/antigravity" - "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" "github.com/gin-gonic/gin" "github.com/google/uuid" @@ -2291,7 +2290,7 @@ func sleepAntigravityBackoffWithContext(ctx context.Context, attempt int) bool { // isSingleAccountRetry 检查 context 中是否设置了单账号退避重试标记 func isSingleAccountRetry(ctx context.Context) bool { - v, _ := ctx.Value(ctxkey.SingleAccountRetry).(bool) + v, _ := SingleAccountRetryFromContext(ctx) return v } diff --git a/backend/internal/service/api_key.go b/backend/internal/service/api_key.go index fe1b3a5d5..07523597c 100644 --- a/backend/internal/service/api_key.go +++ b/backend/internal/service/api_key.go @@ -1,6 +1,10 @@ package service -import "time" +import ( + "time" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ip" +) // API Key status constants const ( @@ -19,11 +23,14 @@ type APIKey struct { Status string IPWhitelist []string IPBlacklist []string - LastUsedAt *time.Time - CreatedAt time.Time - UpdatedAt time.Time - User *User - Group *Group + // 预编译的 IP 规则,用于认证热路径避免重复 ParseIP/ParseCIDR。 + CompiledIPWhitelist *ip.CompiledIPRules `json:"-"` + CompiledIPBlacklist *ip.CompiledIPRules `json:"-"` + LastUsedAt *time.Time + CreatedAt time.Time + UpdatedAt time.Time + User *User + Group *Group // Quota fields Quota float64 // Quota limit in USD (0 = unlimited) diff --git a/backend/internal/service/api_key_auth_cache_impl.go b/backend/internal/service/api_key_auth_cache_impl.go index 77a756742..30eb8d741 100644 --- a/backend/internal/service/api_key_auth_cache_impl.go +++ b/backend/internal/service/api_key_auth_cache_impl.go @@ -298,5 +298,6 @@ func (s *APIKeyService) snapshotToAPIKey(key string, snapshot *APIKeyAuthSnapsho SupportedModelScopes: snapshot.Group.SupportedModelScopes, } } + s.compileAPIKeyIPRules(apiKey) return apiKey } diff --git a/backend/internal/service/api_key_service.go b/backend/internal/service/api_key_service.go index c5e1cfab9..0d073077a 100644 --- a/backend/internal/service/api_key_service.go +++ b/backend/internal/service/api_key_service.go @@ -158,6 +158,14 @@ func NewAPIKeyService( return svc } +func (s *APIKeyService) compileAPIKeyIPRules(apiKey *APIKey) { + if apiKey == nil { + return + } + apiKey.CompiledIPWhitelist = ip.CompileIPRules(apiKey.IPWhitelist) + apiKey.CompiledIPBlacklist = ip.CompileIPRules(apiKey.IPBlacklist) +} + // GenerateKey 生成随机API Key func (s *APIKeyService) GenerateKey() (string, error) { // 生成32字节随机数据 @@ -332,6 +340,7 @@ func (s *APIKeyService) Create(ctx context.Context, userID int64, req CreateAPIK } s.InvalidateAuthCacheByKey(ctx, apiKey.Key) + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } @@ -363,6 +372,7 @@ func (s *APIKeyService) GetByID(ctx context.Context, id int64) (*APIKey, error) if err != nil { return nil, fmt.Errorf("get api key: %w", err) } + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } @@ -375,6 +385,7 @@ func (s *APIKeyService) GetByKey(ctx context.Context, key string) (*APIKey, erro if err != nil { return nil, fmt.Errorf("get api key: %w", err) } + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } } @@ -391,6 +402,7 @@ func (s *APIKeyService) GetByKey(ctx context.Context, key string) (*APIKey, erro if err != nil { return nil, fmt.Errorf("get api key: %w", err) } + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } } else { @@ -402,6 +414,7 @@ func (s *APIKeyService) GetByKey(ctx context.Context, key string) (*APIKey, erro if err != nil { return nil, fmt.Errorf("get api key: %w", err) } + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } } @@ -411,6 +424,7 @@ func (s *APIKeyService) GetByKey(ctx context.Context, key string) (*APIKey, erro return nil, fmt.Errorf("get api key: %w", err) } apiKey.Key = key + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } @@ -510,6 +524,7 @@ func (s *APIKeyService) Update(ctx context.Context, id int64, userID int64, req } s.InvalidateAuthCacheByKey(ctx, apiKey.Key) + s.compileAPIKeyIPRules(apiKey) return apiKey, nil } diff --git a/backend/internal/service/billing_cache_service.go b/backend/internal/service/billing_cache_service.go index a560930bc..1ab0811fe 100644 --- a/backend/internal/service/billing_cache_service.go +++ b/backend/internal/service/billing_cache_service.go @@ -3,6 +3,7 @@ package service import ( "context" "fmt" + "strconv" "sync" "sync/atomic" "time" @@ -10,6 +11,7 @@ import ( "github.com/Wei-Shaw/sub2api/internal/config" infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" + "golang.org/x/sync/singleflight" ) // 错误定义 @@ -58,6 +60,7 @@ const ( cacheWriteBufferSize = 1000 // 任务队列缓冲大小 cacheWriteTimeout = 2 * time.Second // 单个写入操作超时 cacheWriteDropLogInterval = 5 * time.Second // 丢弃日志节流间隔 + balanceLoadTimeout = 3 * time.Second ) // cacheWriteTask 缓存写入任务 @@ -82,6 +85,9 @@ type BillingCacheService struct { cacheWriteChan chan cacheWriteTask cacheWriteWg sync.WaitGroup cacheWriteStopOnce sync.Once + cacheWriteMu sync.RWMutex + stopped atomic.Bool + balanceLoadSF singleflight.Group // 丢弃日志节流计数器(减少高负载下日志噪音) cacheWriteDropFullCount uint64 cacheWriteDropFullLastLog int64 @@ -105,35 +111,52 @@ func NewBillingCacheService(cache BillingCache, userRepo UserRepository, subRepo // Stop 关闭缓存写入工作池 func (s *BillingCacheService) Stop() { s.cacheWriteStopOnce.Do(func() { - if s.cacheWriteChan == nil { + s.stopped.Store(true) + + s.cacheWriteMu.Lock() + ch := s.cacheWriteChan + if ch != nil { + close(ch) + } + s.cacheWriteMu.Unlock() + + if ch == nil { return } - close(s.cacheWriteChan) s.cacheWriteWg.Wait() - s.cacheWriteChan = nil + + s.cacheWriteMu.Lock() + if s.cacheWriteChan == ch { + s.cacheWriteChan = nil + } + s.cacheWriteMu.Unlock() }) } func (s *BillingCacheService) startCacheWriteWorkers() { - s.cacheWriteChan = make(chan cacheWriteTask, cacheWriteBufferSize) + ch := make(chan cacheWriteTask, cacheWriteBufferSize) + s.cacheWriteChan = ch for i := 0; i < cacheWriteWorkerCount; i++ { s.cacheWriteWg.Add(1) - go s.cacheWriteWorker() + go s.cacheWriteWorker(ch) } } // enqueueCacheWrite 尝试将任务入队,队列满时返回 false(并记录告警)。 func (s *BillingCacheService) enqueueCacheWrite(task cacheWriteTask) (enqueued bool) { + if s.stopped.Load() { + s.logCacheWriteDrop(task, "closed") + return false + } + + s.cacheWriteMu.RLock() + defer s.cacheWriteMu.RUnlock() + if s.cacheWriteChan == nil { + s.logCacheWriteDrop(task, "closed") return false } - defer func() { - if recovered := recover(); recovered != nil { - // 队列已关闭时可能触发 panic,记录后静默失败。 - s.logCacheWriteDrop(task, "closed") - enqueued = false - } - }() + select { case s.cacheWriteChan <- task: return true @@ -144,9 +167,9 @@ func (s *BillingCacheService) enqueueCacheWrite(task cacheWriteTask) (enqueued b } } -func (s *BillingCacheService) cacheWriteWorker() { +func (s *BillingCacheService) cacheWriteWorker(ch <-chan cacheWriteTask) { defer s.cacheWriteWg.Done() - for task := range s.cacheWriteChan { + for task := range ch { ctx, cancel := context.WithTimeout(context.Background(), cacheWriteTimeout) switch task.kind { case cacheWriteSetBalance: @@ -243,20 +266,28 @@ func (s *BillingCacheService) GetUserBalance(ctx context.Context, userID int64) return balance, nil } - // 缓存未命中,从数据库读取 - balance, err = s.getUserBalanceFromDB(ctx, userID) + // 缓存未命中:singleflight 合并同一 userID 的并发回源请求。 + value, err, _ := s.balanceLoadSF.Do(strconv.FormatInt(userID, 10), func() (any, error) { + loadCtx, cancel := context.WithTimeout(context.Background(), balanceLoadTimeout) + defer cancel() + + balance, err := s.getUserBalanceFromDB(loadCtx, userID) + if err != nil { + return nil, err + } + + // 异步建立缓存 + _ = s.enqueueCacheWrite(cacheWriteTask{ + kind: cacheWriteSetBalance, + userID: userID, + balance: balance, + }) + return balance, nil + }) if err != nil { return 0, err } - - // 异步建立缓存 - _ = s.enqueueCacheWrite(cacheWriteTask{ - kind: cacheWriteSetBalance, - userID: userID, - balance: balance, - }) - - return balance, nil + return value.(float64), nil } // getUserBalanceFromDB 从数据库获取用户余额 diff --git a/backend/internal/service/billing_cache_service_singleflight_test.go b/backend/internal/service/billing_cache_service_singleflight_test.go new file mode 100644 index 000000000..1b12c4029 --- /dev/null +++ b/backend/internal/service/billing_cache_service_singleflight_test.go @@ -0,0 +1,115 @@ +//go:build unit + +package service + +import ( + "context" + "errors" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/stretchr/testify/require" +) + +type billingCacheMissStub struct { + setBalanceCalls atomic.Int64 +} + +func (s *billingCacheMissStub) GetUserBalance(ctx context.Context, userID int64) (float64, error) { + return 0, errors.New("cache miss") +} + +func (s *billingCacheMissStub) SetUserBalance(ctx context.Context, userID int64, balance float64) error { + s.setBalanceCalls.Add(1) + return nil +} + +func (s *billingCacheMissStub) DeductUserBalance(ctx context.Context, userID int64, amount float64) error { + return nil +} + +func (s *billingCacheMissStub) InvalidateUserBalance(ctx context.Context, userID int64) error { + return nil +} + +func (s *billingCacheMissStub) GetSubscriptionCache(ctx context.Context, userID, groupID int64) (*SubscriptionCacheData, error) { + return nil, errors.New("cache miss") +} + +func (s *billingCacheMissStub) SetSubscriptionCache(ctx context.Context, userID, groupID int64, data *SubscriptionCacheData) error { + return nil +} + +func (s *billingCacheMissStub) UpdateSubscriptionUsage(ctx context.Context, userID, groupID int64, cost float64) error { + return nil +} + +func (s *billingCacheMissStub) InvalidateSubscriptionCache(ctx context.Context, userID, groupID int64) error { + return nil +} + +type balanceLoadUserRepoStub struct { + mockUserRepo + calls atomic.Int64 + delay time.Duration + balance float64 +} + +func (s *balanceLoadUserRepoStub) GetByID(ctx context.Context, id int64) (*User, error) { + s.calls.Add(1) + if s.delay > 0 { + select { + case <-time.After(s.delay): + case <-ctx.Done(): + return nil, ctx.Err() + } + } + return &User{ID: id, Balance: s.balance}, nil +} + +func TestBillingCacheServiceGetUserBalance_Singleflight(t *testing.T) { + cache := &billingCacheMissStub{} + userRepo := &balanceLoadUserRepoStub{ + delay: 80 * time.Millisecond, + balance: 12.34, + } + svc := NewBillingCacheService(cache, userRepo, nil, &config.Config{}) + t.Cleanup(svc.Stop) + + const goroutines = 16 + start := make(chan struct{}) + var wg sync.WaitGroup + errCh := make(chan error, goroutines) + balCh := make(chan float64, goroutines) + + for i := 0; i < goroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + <-start + bal, err := svc.GetUserBalance(context.Background(), 99) + errCh <- err + balCh <- bal + }() + } + + close(start) + wg.Wait() + close(errCh) + close(balCh) + + for err := range errCh { + require.NoError(t, err) + } + for bal := range balCh { + require.Equal(t, 12.34, bal) + } + + require.Equal(t, int64(1), userRepo.calls.Load(), "并发穿透应被 singleflight 合并") + require.Eventually(t, func() bool { + return cache.setBalanceCalls.Load() >= 1 + }, time.Second, 10*time.Millisecond) +} diff --git a/backend/internal/service/billing_cache_service_test.go b/backend/internal/service/billing_cache_service_test.go index 445d5319a..4e5f50e2e 100644 --- a/backend/internal/service/billing_cache_service_test.go +++ b/backend/internal/service/billing_cache_service_test.go @@ -73,3 +73,16 @@ func TestBillingCacheServiceQueueHighLoad(t *testing.T) { return atomic.LoadInt64(&cache.subscriptionUpdates) > 0 }, 2*time.Second, 10*time.Millisecond) } + +func TestBillingCacheServiceEnqueueAfterStopReturnsFalse(t *testing.T) { + cache := &billingCacheWorkerStub{} + svc := NewBillingCacheService(cache, nil, nil, &config.Config{}) + svc.Stop() + + enqueued := svc.enqueueCacheWrite(cacheWriteTask{ + kind: cacheWriteDeductBalance, + userID: 1, + amount: 1, + }) + require.False(t, enqueued) +} diff --git a/backend/internal/service/claude_code_validator.go b/backend/internal/service/claude_code_validator.go index 6d06c83ed..d3a4d119b 100644 --- a/backend/internal/service/claude_code_validator.go +++ b/backend/internal/service/claude_code_validator.go @@ -78,7 +78,7 @@ func (v *ClaudeCodeValidator) Validate(r *http.Request, body map[string]any) boo // Step 3: 检查 max_tokens=1 + haiku 探测请求绕过 // 这类请求用于 Claude Code 验证 API 连通性,不携带 system prompt - if isMaxTokensOneHaiku, ok := r.Context().Value(ctxkey.IsMaxTokensOneHaikuRequest).(bool); ok && isMaxTokensOneHaiku { + if isMaxTokensOneHaiku, ok := IsMaxTokensOneHaikuRequestFromContext(r.Context()); ok && isMaxTokensOneHaiku { return true // 绕过 system prompt 检查,UA 已在 Step 1 验证 } diff --git a/backend/internal/service/concurrency_service.go b/backend/internal/service/concurrency_service.go index 32b6d97cd..4dcf84e0a 100644 --- a/backend/internal/service/concurrency_service.go +++ b/backend/internal/service/concurrency_service.go @@ -3,8 +3,10 @@ package service import ( "context" "crypto/rand" - "encoding/hex" - "fmt" + "encoding/binary" + "os" + "strconv" + "sync/atomic" "time" "github.com/Wei-Shaw/sub2api/internal/pkg/logger" @@ -18,6 +20,7 @@ type ConcurrencyCache interface { AcquireAccountSlot(ctx context.Context, accountID int64, maxConcurrency int, requestID string) (bool, error) ReleaseAccountSlot(ctx context.Context, accountID int64, requestID string) error GetAccountConcurrency(ctx context.Context, accountID int64) (int, error) + GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) // 账号等待队列(账号级) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) @@ -42,15 +45,25 @@ type ConcurrencyCache interface { CleanupExpiredAccountSlots(ctx context.Context, accountID int64) error } -// generateRequestID generates a unique request ID for concurrency slot tracking -// Uses 8 random bytes (16 hex chars) for uniqueness -func generateRequestID() string { +var ( + requestIDPrefix = initRequestIDPrefix() + requestIDCounter atomic.Uint64 +) + +func initRequestIDPrefix() string { b := make([]byte, 8) - if _, err := rand.Read(b); err != nil { - // Fallback to nanosecond timestamp (extremely rare case) - return fmt.Sprintf("%x", time.Now().UnixNano()) + if _, err := rand.Read(b); err == nil { + return "r" + strconv.FormatUint(binary.BigEndian.Uint64(b), 36) } - return hex.EncodeToString(b) + fallback := uint64(time.Now().UnixNano()) ^ (uint64(os.Getpid()) << 16) + return "r" + strconv.FormatUint(fallback, 36) +} + +// generateRequestID generates a unique request ID for concurrency slot tracking. +// Format: {process_random_prefix}-{base36_counter} +func generateRequestID() string { + seq := requestIDCounter.Add(1) + return requestIDPrefix + "-" + strconv.FormatUint(seq, 36) } const ( @@ -321,16 +334,15 @@ func (s *ConcurrencyService) StartSlotCleanupWorker(accountRepo AccountRepositor // GetAccountConcurrencyBatch gets current concurrency counts for multiple accounts // Returns a map of accountID -> current concurrency count func (s *ConcurrencyService) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { - result := make(map[int64]int) - - for _, accountID := range accountIDs { - count, err := s.cache.GetAccountConcurrency(ctx, accountID) - if err != nil { - // If key doesn't exist in Redis, count is 0 - count = 0 + if len(accountIDs) == 0 { + return map[int64]int{}, nil + } + if s.cache == nil { + result := make(map[int64]int, len(accountIDs)) + for _, accountID := range accountIDs { + result[accountID] = 0 } - result[accountID] = count + return result, nil } - - return result, nil + return s.cache.GetAccountConcurrencyBatch(ctx, accountIDs) } diff --git a/backend/internal/service/concurrency_service_test.go b/backend/internal/service/concurrency_service_test.go index 33ce4cb9c..9ba43d936 100644 --- a/backend/internal/service/concurrency_service_test.go +++ b/backend/internal/service/concurrency_service_test.go @@ -5,6 +5,8 @@ package service import ( "context" "errors" + "strconv" + "strings" "testing" "github.com/stretchr/testify/require" @@ -12,20 +14,20 @@ import ( // stubConcurrencyCacheForTest 用于并发服务单元测试的缓存桩 type stubConcurrencyCacheForTest struct { - acquireResult bool - acquireErr error - releaseErr error - concurrency int + acquireResult bool + acquireErr error + releaseErr error + concurrency int concurrencyErr error - waitAllowed bool - waitErr error - waitCount int - waitCountErr error - loadBatch map[int64]*AccountLoadInfo - loadBatchErr error + waitAllowed bool + waitErr error + waitCount int + waitCountErr error + loadBatch map[int64]*AccountLoadInfo + loadBatchErr error usersLoadBatch map[int64]*UserLoadInfo usersLoadErr error - cleanupErr error + cleanupErr error // 记录调用 releasedAccountIDs []int64 @@ -45,6 +47,16 @@ func (c *stubConcurrencyCacheForTest) ReleaseAccountSlot(_ context.Context, acco func (c *stubConcurrencyCacheForTest) GetAccountConcurrency(_ context.Context, _ int64) (int, error) { return c.concurrency, c.concurrencyErr } +func (c *stubConcurrencyCacheForTest) GetAccountConcurrencyBatch(_ context.Context, accountIDs []int64) (map[int64]int, error) { + result := make(map[int64]int, len(accountIDs)) + for _, accountID := range accountIDs { + if c.concurrencyErr != nil { + return nil, c.concurrencyErr + } + result[accountID] = c.concurrency + } + return result, nil +} func (c *stubConcurrencyCacheForTest) IncrementAccountWaitCount(_ context.Context, _ int64, _ int) (bool, error) { return c.waitAllowed, c.waitErr } @@ -155,6 +167,25 @@ func TestAcquireUserSlot_UnlimitedConcurrency(t *testing.T) { require.True(t, result.Acquired) } +func TestGenerateRequestID_UsesStablePrefixAndMonotonicCounter(t *testing.T) { + id1 := generateRequestID() + id2 := generateRequestID() + require.NotEmpty(t, id1) + require.NotEmpty(t, id2) + + p1 := strings.Split(id1, "-") + p2 := strings.Split(id2, "-") + require.Len(t, p1, 2) + require.Len(t, p2, 2) + require.Equal(t, p1[0], p2[0], "同一进程前缀应保持一致") + + n1, err := strconv.ParseUint(p1[1], 36, 64) + require.NoError(t, err) + n2, err := strconv.ParseUint(p2[1], 36, 64) + require.NoError(t, err) + require.Equal(t, n1+1, n2, "计数器应单调递增") +} + func TestGetAccountsLoadBatch_ReturnsCorrectData(t *testing.T) { expected := map[int64]*AccountLoadInfo{ 1: {AccountID: 1, CurrentConcurrency: 3, WaitingCount: 0, LoadRate: 60}, diff --git a/backend/internal/service/gateway_multiplatform_test.go b/backend/internal/service/gateway_multiplatform_test.go index 5055eec05..067a0e08d 100644 --- a/backend/internal/service/gateway_multiplatform_test.go +++ b/backend/internal/service/gateway_multiplatform_test.go @@ -1892,6 +1892,14 @@ func (m *mockConcurrencyCache) GetAccountConcurrency(ctx context.Context, accoun return 0, nil } +func (m *mockConcurrencyCache) GetAccountConcurrencyBatch(ctx context.Context, accountIDs []int64) (map[int64]int, error) { + result := make(map[int64]int, len(accountIDs)) + for _, accountID := range accountIDs { + result[accountID] = 0 + } + return result, nil +} + func (m *mockConcurrencyCache) IncrementAccountWaitCount(ctx context.Context, accountID int64, maxWait int) (bool, error) { return true, nil } diff --git a/backend/internal/service/gateway_service.go b/backend/internal/service/gateway_service.go index fb875f7f3..70b9fb9be 100644 --- a/backend/internal/service/gateway_service.go +++ b/backend/internal/service/gateway_service.go @@ -127,13 +127,26 @@ func WithForceCacheBilling(ctx context.Context) context.Context { } func (s *GatewayService) debugModelRoutingEnabled() bool { - v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_MODEL_ROUTING"))) - return v == "1" || v == "true" || v == "yes" || v == "on" + if s == nil { + return false + } + return s.debugModelRouting.Load() } func (s *GatewayService) debugClaudeMimicEnabled() bool { - v := strings.ToLower(strings.TrimSpace(os.Getenv("SUB2API_DEBUG_CLAUDE_MIMIC"))) - return v == "1" || v == "true" || v == "yes" || v == "on" + if s == nil { + return false + } + return s.debugClaudeMimic.Load() +} + +func parseDebugEnvBool(raw string) bool { + switch strings.ToLower(strings.TrimSpace(raw)) { + case "1", "true", "yes", "on": + return true + default: + return false + } } func shortSessionHash(sessionHash string) string { @@ -374,37 +387,16 @@ func modelsListCacheKey(groupID *int64, platform string) string { } func prefetchedStickyGroupIDFromContext(ctx context.Context) (int64, bool) { - if ctx == nil { - return 0, false - } - v := ctx.Value(ctxkey.PrefetchedStickyGroupID) - switch t := v.(type) { - case int64: - return t, true - case int: - return int64(t), true - } - return 0, false + return PrefetchedStickyGroupIDFromContext(ctx) } func prefetchedStickyAccountIDFromContext(ctx context.Context, groupID *int64) int64 { - if ctx == nil { - return 0 - } prefetchedGroupID, ok := prefetchedStickyGroupIDFromContext(ctx) if !ok || prefetchedGroupID != derefGroupID(groupID) { return 0 } - v := ctx.Value(ctxkey.PrefetchedStickyAccountID) - switch t := v.(type) { - case int64: - if t > 0 { - return t - } - case int: - if t > 0 { - return int64(t) - } + if accountID, ok := PrefetchedStickyAccountIDFromContext(ctx); ok && accountID > 0 { + return accountID } return 0 } @@ -509,29 +501,32 @@ func (s *GatewayService) TempUnscheduleRetryableError(ctx context.Context, accou // GatewayService handles API gateway operations type GatewayService struct { - accountRepo AccountRepository - groupRepo GroupRepository - usageLogRepo UsageLogRepository - userRepo UserRepository - userSubRepo UserSubscriptionRepository - userGroupRateRepo UserGroupRateRepository - cache GatewayCache - digestStore *DigestSessionStore - cfg *config.Config - schedulerSnapshot *SchedulerSnapshotService - billingService *BillingService - rateLimitService *RateLimitService - billingCacheService *BillingCacheService - identityService *IdentityService - httpUpstream HTTPUpstream - deferredService *DeferredService - concurrencyService *ConcurrencyService - claudeTokenProvider *ClaudeTokenProvider - sessionLimitCache SessionLimitCache // 会话数量限制缓存(仅 Anthropic OAuth/SetupToken) - userGroupRateCache *gocache.Cache - userGroupRateSF singleflight.Group - modelsListCache *gocache.Cache - modelsListCacheTTL time.Duration + accountRepo AccountRepository + groupRepo GroupRepository + usageLogRepo UsageLogRepository + userRepo UserRepository + userSubRepo UserSubscriptionRepository + userGroupRateRepo UserGroupRateRepository + cache GatewayCache + digestStore *DigestSessionStore + cfg *config.Config + schedulerSnapshot *SchedulerSnapshotService + billingService *BillingService + rateLimitService *RateLimitService + billingCacheService *BillingCacheService + identityService *IdentityService + httpUpstream HTTPUpstream + deferredService *DeferredService + concurrencyService *ConcurrencyService + claudeTokenProvider *ClaudeTokenProvider + sessionLimitCache SessionLimitCache // 会话数量限制缓存(仅 Anthropic OAuth/SetupToken) + userGroupRateCache *gocache.Cache + userGroupRateSF singleflight.Group + modelsListCache *gocache.Cache + modelsListCacheTTL time.Duration + responseHeaderFilter *responseheaders.CompiledHeaderFilter + debugModelRouting atomic.Bool + debugClaudeMimic atomic.Bool } // NewGatewayService creates a new GatewayService @@ -559,30 +554,34 @@ func NewGatewayService( userGroupRateTTL := resolveUserGroupRateCacheTTL(cfg) modelsListTTL := resolveModelsListCacheTTL(cfg) - return &GatewayService{ - accountRepo: accountRepo, - groupRepo: groupRepo, - usageLogRepo: usageLogRepo, - userRepo: userRepo, - userSubRepo: userSubRepo, - userGroupRateRepo: userGroupRateRepo, - cache: cache, - digestStore: digestStore, - cfg: cfg, - schedulerSnapshot: schedulerSnapshot, - concurrencyService: concurrencyService, - billingService: billingService, - rateLimitService: rateLimitService, - billingCacheService: billingCacheService, - identityService: identityService, - httpUpstream: httpUpstream, - deferredService: deferredService, - claudeTokenProvider: claudeTokenProvider, - sessionLimitCache: sessionLimitCache, - userGroupRateCache: gocache.New(userGroupRateTTL, time.Minute), - modelsListCache: gocache.New(modelsListTTL, time.Minute), - modelsListCacheTTL: modelsListTTL, - } + svc := &GatewayService{ + accountRepo: accountRepo, + groupRepo: groupRepo, + usageLogRepo: usageLogRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + userGroupRateRepo: userGroupRateRepo, + cache: cache, + digestStore: digestStore, + cfg: cfg, + schedulerSnapshot: schedulerSnapshot, + concurrencyService: concurrencyService, + billingService: billingService, + rateLimitService: rateLimitService, + billingCacheService: billingCacheService, + identityService: identityService, + httpUpstream: httpUpstream, + deferredService: deferredService, + claudeTokenProvider: claudeTokenProvider, + sessionLimitCache: sessionLimitCache, + userGroupRateCache: gocache.New(userGroupRateTTL, time.Minute), + modelsListCache: gocache.New(modelsListTTL, time.Minute), + modelsListCacheTTL: modelsListTTL, + responseHeaderFilter: compileResponseHeaderFilter(cfg), + } + svc.debugModelRouting.Store(parseDebugEnvBool(os.Getenv("SUB2API_DEBUG_MODEL_ROUTING"))) + svc.debugClaudeMimic.Store(parseDebugEnvBool(os.Getenv("SUB2API_DEBUG_CLAUDE_MIMIC"))) + return svc } // GenerateSessionHash 从预解析请求计算粘性会话 hash @@ -2801,7 +2800,7 @@ func (s *GatewayService) isModelSupportedByAccountWithContext(ctx context.Contex return false } // 应用 thinking 后缀后检查最终模型是否在账号映射中 - if enabled, ok := ctx.Value(ctxkey.ThinkingEnabled).(bool); ok { + if enabled, ok := ThinkingEnabledFromContext(ctx); ok { finalModel := applyThinkingModelSuffix(mapped, enabled) if finalModel == mapped { return true // thinking 后缀未改变模型名,映射已通过 @@ -4012,7 +4011,7 @@ func (s *GatewayService) handleStreamingResponseAnthropicAPIKeyPassthrough( s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) } - writeAnthropicPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.cfg) + writeAnthropicPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) if contentType == "" { @@ -4308,7 +4307,7 @@ func (s *GatewayService) handleNonStreamingResponseAnthropicAPIKeyPassthrough( usage := parseClaudeUsageFromResponseBody(body) - writeAnthropicPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.cfg) + writeAnthropicPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) if contentType == "" { contentType = "application/json" @@ -4317,12 +4316,12 @@ func (s *GatewayService) handleNonStreamingResponseAnthropicAPIKeyPassthrough( return usage, nil } -func writeAnthropicPassthroughResponseHeaders(dst http.Header, src http.Header, cfg *config.Config) { +func writeAnthropicPassthroughResponseHeaders(dst http.Header, src http.Header, filter *responseheaders.CompiledHeaderFilter) { if dst == nil || src == nil { return } - if cfg != nil { - responseheaders.WriteFilteredHeaders(dst, src, cfg.Security.ResponseHeaders) + if filter != nil { + responseheaders.WriteFilteredHeaders(dst, src, filter) return } if v := strings.TrimSpace(src.Get("Content-Type")); v != "" { @@ -5007,8 +5006,8 @@ func (s *GatewayService) handleStreamingResponse(ctx context.Context, resp *http // 更新5h窗口状态 s.rateLimitService.UpdateSessionWindow(ctx, account, resp.Header) - if s.cfg != nil { - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + if s.responseHeaderFilter != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) } // 设置SSE响应头 @@ -5598,7 +5597,7 @@ func (s *GatewayService) handleNonStreamingResponse(ctx context.Context, resp *h body = s.replaceModelInResponseBody(body, mappedModel, originalModel) } - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := "application/json" if s.cfg != nil && !s.cfg.Security.ResponseHeaders.Enabled { @@ -6354,7 +6353,7 @@ func (s *GatewayService) forwardCountTokensAnthropicAPIKeyPassthrough(ctx contex return fmt.Errorf("upstream error: %d message=%s", resp.StatusCode, upstreamMsg) } - writeAnthropicPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.cfg) + writeAnthropicPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := strings.TrimSpace(resp.Header.Get("Content-Type")) if contentType == "" { contentType = "application/json" diff --git a/backend/internal/service/gemini_messages_compat_service.go b/backend/internal/service/gemini_messages_compat_service.go index b51744511..1d2f34239 100644 --- a/backend/internal/service/gemini_messages_compat_service.go +++ b/backend/internal/service/gemini_messages_compat_service.go @@ -53,6 +53,7 @@ type GeminiMessagesCompatService struct { httpUpstream HTTPUpstream antigravityGatewayService *AntigravityGatewayService cfg *config.Config + responseHeaderFilter *responseheaders.CompiledHeaderFilter } func NewGeminiMessagesCompatService( @@ -76,6 +77,7 @@ func NewGeminiMessagesCompatService( httpUpstream: httpUpstream, antigravityGatewayService: antigravityGatewayService, cfg: cfg, + responseHeaderFilter: compileResponseHeaderFilter(cfg), } } @@ -2429,7 +2431,7 @@ func (s *GeminiMessagesCompatService) handleNativeNonStreamingResponse(c *gin.Co } } - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := resp.Header.Get("Content-Type") if contentType == "" { @@ -2454,8 +2456,8 @@ func (s *GeminiMessagesCompatService) handleNativeStreamingResponse(c *gin.Conte logger.LegacyPrintf("service.gemini_messages_compat", "[GeminiAPI] ====================================================") } - if s.cfg != nil { - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + if s.responseHeaderFilter != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) } c.Status(resp.StatusCode) @@ -2596,7 +2598,7 @@ func (s *GeminiMessagesCompatService) ForwardAIStudioGET(ctx context.Context, ac body, _ := io.ReadAll(io.LimitReader(resp.Body, 8<<20)) wwwAuthenticate := resp.Header.Get("Www-Authenticate") - filteredHeaders := responseheaders.FilterHeaders(resp.Header, s.cfg.Security.ResponseHeaders) + filteredHeaders := responseheaders.FilterHeaders(resp.Header, s.responseHeaderFilter) if wwwAuthenticate != "" { filteredHeaders.Set("Www-Authenticate", wwwAuthenticate) } diff --git a/backend/internal/service/model_rate_limit.go b/backend/internal/service/model_rate_limit.go index ff4b5977f..c45615cc4 100644 --- a/backend/internal/service/model_rate_limit.go +++ b/backend/internal/service/model_rate_limit.go @@ -4,8 +4,6 @@ import ( "context" "strings" "time" - - "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" ) const modelRateLimitsKey = "model_rate_limits" @@ -73,7 +71,7 @@ func resolveFinalAntigravityModelKey(ctx context.Context, account *Account, requ return "" } // thinking 会影响 Antigravity 最终模型名(例如 claude-sonnet-4-5 -> claude-sonnet-4-5-thinking) - if enabled, ok := ctx.Value(ctxkey.ThinkingEnabled).(bool); ok { + if enabled, ok := ThinkingEnabledFromContext(ctx); ok { modelKey = applyThinkingModelSuffix(modelKey, enabled) } return modelKey diff --git a/backend/internal/service/openai_account_scheduler.go b/backend/internal/service/openai_account_scheduler.go index bb7daf099..fade0558c 100644 --- a/backend/internal/service/openai_account_scheduler.go +++ b/backend/internal/service/openai_account_scheduler.go @@ -21,6 +21,7 @@ const ( type OpenAIAccountScheduleRequest struct { GroupID *int64 SessionHash string + StickyAccountID int64 PreviousResponseID string RequestedModel string ExcludedIDs map[int64]struct{} @@ -288,9 +289,15 @@ func (s *defaultOpenAIAccountScheduler) selectBySessionHash( return nil, nil } - cacheKey := "openai:" + sessionHash - accountID, err := s.service.cache.GetSessionAccountID(ctx, derefGroupID(req.GroupID), cacheKey) - if err != nil || accountID <= 0 { + accountID := req.StickyAccountID + if accountID <= 0 { + var err error + accountID, err = s.service.getStickySessionAccountID(ctx, req.GroupID, sessionHash) + if err != nil || accountID <= 0 { + return nil, nil + } + } + if accountID <= 0 { return nil, nil } if req.ExcludedIDs != nil { @@ -301,11 +308,11 @@ func (s *defaultOpenAIAccountScheduler) selectBySessionHash( account, err := s.service.getSchedulableAccount(ctx, accountID) if err != nil || account == nil { - _ = s.service.cache.DeleteSessionAccountID(ctx, derefGroupID(req.GroupID), cacheKey) + _ = s.service.deleteStickySessionAccountID(ctx, req.GroupID, sessionHash) return nil, nil } if shouldClearStickySession(account, req.RequestedModel) || !account.IsOpenAI() { - _ = s.service.cache.DeleteSessionAccountID(ctx, derefGroupID(req.GroupID), cacheKey) + _ = s.service.deleteStickySessionAccountID(ctx, req.GroupID, sessionHash) return nil, nil } if req.RequestedModel != "" && !account.IsModelSupported(req.RequestedModel) { @@ -314,12 +321,7 @@ func (s *defaultOpenAIAccountScheduler) selectBySessionHash( result, acquireErr := s.service.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) if acquireErr == nil && result.Acquired { - _ = s.service.cache.RefreshSessionTTL( - ctx, - derefGroupID(req.GroupID), - cacheKey, - s.service.openAIWSSessionStickyTTL(), - ) + _ = s.service.refreshStickySessionTTL(ctx, req.GroupID, sessionHash, s.service.openAIWSSessionStickyTTL()) return &AccountSelectionResult{ Account: account, Acquired: true, @@ -660,9 +662,18 @@ func (s *OpenAIGatewayService) SelectAccountWithScheduler( decision.Layer = openAIAccountScheduleLayerLoadBalance return selection, decision, err } + + var stickyAccountID int64 + if sessionHash != "" && s.cache != nil { + if accountID, err := s.getStickySessionAccountID(ctx, groupID, sessionHash); err == nil && accountID > 0 { + stickyAccountID = accountID + } + } + return scheduler.Select(ctx, OpenAIAccountScheduleRequest{ GroupID: groupID, SessionHash: sessionHash, + StickyAccountID: stickyAccountID, PreviousResponseID: previousResponseID, RequestedModel: requestedModel, ExcludedIDs: excludedIDs, diff --git a/backend/internal/service/openai_gateway_service.go b/backend/internal/service/openai_gateway_service.go index 7eb3ee002..5bef19ba1 100644 --- a/backend/internal/service/openai_gateway_service.go +++ b/backend/internal/service/openai_gateway_service.go @@ -221,6 +221,21 @@ type OpenAIWSRetryMetricsSnapshot struct { NonRetryableFastFallbackTotal int64 `json:"non_retryable_fast_fallback_total"` } +type OpenAICompatibilityFallbackMetricsSnapshot struct { + SessionHashLegacyReadFallbackTotal int64 `json:"session_hash_legacy_read_fallback_total"` + SessionHashLegacyReadFallbackHit int64 `json:"session_hash_legacy_read_fallback_hit"` + SessionHashLegacyDualWriteTotal int64 `json:"session_hash_legacy_dual_write_total"` + SessionHashLegacyReadHitRate float64 `json:"session_hash_legacy_read_hit_rate"` + + MetadataLegacyFallbackIsMaxTokensOneHaikuTotal int64 `json:"metadata_legacy_fallback_is_max_tokens_one_haiku_total"` + MetadataLegacyFallbackThinkingEnabledTotal int64 `json:"metadata_legacy_fallback_thinking_enabled_total"` + MetadataLegacyFallbackPrefetchedStickyAccount int64 `json:"metadata_legacy_fallback_prefetched_sticky_account_total"` + MetadataLegacyFallbackPrefetchedStickyGroup int64 `json:"metadata_legacy_fallback_prefetched_sticky_group_total"` + MetadataLegacyFallbackSingleAccountRetryTotal int64 `json:"metadata_legacy_fallback_single_account_retry_total"` + MetadataLegacyFallbackAccountSwitchCountTotal int64 `json:"metadata_legacy_fallback_account_switch_count_total"` + MetadataLegacyFallbackTotal int64 `json:"metadata_legacy_fallback_total"` +} + type openAIWSRetryMetrics struct { retryAttempts atomic.Int64 retryBackoffMs atomic.Int64 @@ -258,6 +273,7 @@ type OpenAIGatewayService struct { openaiWSFallbackUntil sync.Map // key: int64(accountID), value: time.Time openaiWSRetryMetrics openAIWSRetryMetrics + responseHeaderFilter *responseheaders.CompiledHeaderFilter } // NewOpenAIGatewayService creates a new OpenAIGatewayService @@ -278,23 +294,24 @@ func NewOpenAIGatewayService( openAITokenProvider *OpenAITokenProvider, ) *OpenAIGatewayService { svc := &OpenAIGatewayService{ - accountRepo: accountRepo, - usageLogRepo: usageLogRepo, - userRepo: userRepo, - userSubRepo: userSubRepo, - cache: cache, - cfg: cfg, - codexDetector: NewOpenAICodexClientRestrictionDetector(cfg), - schedulerSnapshot: schedulerSnapshot, - concurrencyService: concurrencyService, - billingService: billingService, - rateLimitService: rateLimitService, - billingCacheService: billingCacheService, - httpUpstream: httpUpstream, - deferredService: deferredService, - openAITokenProvider: openAITokenProvider, - toolCorrector: NewCodexToolCorrector(), - openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + accountRepo: accountRepo, + usageLogRepo: usageLogRepo, + userRepo: userRepo, + userSubRepo: userSubRepo, + cache: cache, + cfg: cfg, + codexDetector: NewOpenAICodexClientRestrictionDetector(cfg), + schedulerSnapshot: schedulerSnapshot, + concurrencyService: concurrencyService, + billingService: billingService, + rateLimitService: rateLimitService, + billingCacheService: billingCacheService, + httpUpstream: httpUpstream, + deferredService: deferredService, + openAITokenProvider: openAITokenProvider, + toolCorrector: NewCodexToolCorrector(), + openaiWSResolver: NewOpenAIWSProtocolResolver(cfg), + responseHeaderFilter: compileResponseHeaderFilter(cfg), } svc.logOpenAIWSModeBootstrap() return svc @@ -629,6 +646,32 @@ func (s *OpenAIGatewayService) SnapshotOpenAIWSRetryMetrics() OpenAIWSRetryMetri } } +func SnapshotOpenAICompatibilityFallbackMetrics() OpenAICompatibilityFallbackMetricsSnapshot { + legacyReadFallbackTotal, legacyReadFallbackHit, legacyDualWriteTotal := openAIStickyCompatStats() + isMaxTokensOneHaiku, thinkingEnabled, prefetchedStickyAccount, prefetchedStickyGroup, singleAccountRetry, accountSwitchCount := RequestMetadataFallbackStats() + + readHitRate := float64(0) + if legacyReadFallbackTotal > 0 { + readHitRate = float64(legacyReadFallbackHit) / float64(legacyReadFallbackTotal) + } + metadataFallbackTotal := isMaxTokensOneHaiku + thinkingEnabled + prefetchedStickyAccount + prefetchedStickyGroup + singleAccountRetry + accountSwitchCount + + return OpenAICompatibilityFallbackMetricsSnapshot{ + SessionHashLegacyReadFallbackTotal: legacyReadFallbackTotal, + SessionHashLegacyReadFallbackHit: legacyReadFallbackHit, + SessionHashLegacyDualWriteTotal: legacyDualWriteTotal, + SessionHashLegacyReadHitRate: readHitRate, + + MetadataLegacyFallbackIsMaxTokensOneHaikuTotal: isMaxTokensOneHaiku, + MetadataLegacyFallbackThinkingEnabledTotal: thinkingEnabled, + MetadataLegacyFallbackPrefetchedStickyAccount: prefetchedStickyAccount, + MetadataLegacyFallbackPrefetchedStickyGroup: prefetchedStickyGroup, + MetadataLegacyFallbackSingleAccountRetryTotal: singleAccountRetry, + MetadataLegacyFallbackAccountSwitchCountTotal: accountSwitchCount, + MetadataLegacyFallbackTotal: metadataFallbackTotal, + } +} + func (s *OpenAIGatewayService) detectCodexClientRestriction(c *gin.Context, account *Account) CodexClientRestrictionDetectionResult { return s.getCodexClientRestrictionDetector().Detect(c, account) } @@ -855,8 +898,9 @@ func (s *OpenAIGatewayService) GenerateSessionHash(c *gin.Context, body []byte) return "" } - hash := sha256.Sum256([]byte(sessionID)) - return hex.EncodeToString(hash[:]) + currentHash, legacyHash := deriveOpenAISessionHashes(sessionID) + attachOpenAILegacySessionHashToGin(c, legacyHash) + return currentHash } // BindStickySession sets session -> account binding with standard TTL. @@ -868,7 +912,7 @@ func (s *OpenAIGatewayService) BindStickySession(ctx context.Context, groupID *i if s != nil && s.cfg != nil && s.cfg.Gateway.OpenAIWS.StickySessionTTLSeconds > 0 { ttl = time.Duration(s.cfg.Gateway.OpenAIWS.StickySessionTTLSeconds) * time.Second } - return s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, accountID, ttl) + return s.setStickySessionAccountID(ctx, groupID, sessionHash, accountID, ttl) } // SelectAccount selects an OpenAI account with sticky session support @@ -884,11 +928,13 @@ func (s *OpenAIGatewayService) SelectAccountForModel(ctx context.Context, groupI // SelectAccountForModelWithExclusions selects an account supporting the requested model while excluding specified accounts. // SelectAccountForModelWithExclusions 选择支持指定模型的账号,同时排除指定的账号。 func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}) (*Account, error) { - cacheKey := "openai:" + sessionHash + return s.selectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs, 0) +} +func (s *OpenAIGatewayService) selectAccountForModelWithExclusions(ctx context.Context, groupID *int64, sessionHash string, requestedModel string, excludedIDs map[int64]struct{}, stickyAccountID int64) (*Account, error) { // 1. 尝试粘性会话命中 // Try sticky session hit - if account := s.tryStickySessionHit(ctx, groupID, sessionHash, cacheKey, requestedModel, excludedIDs); account != nil { + if account := s.tryStickySessionHit(ctx, groupID, sessionHash, requestedModel, excludedIDs, stickyAccountID); account != nil { return account, nil } @@ -913,7 +959,7 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C // 4. 设置粘性会话绑定 // Set sticky session binding if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), cacheKey, selected.ID, openaiStickySessionTTL) + _ = s.setStickySessionAccountID(ctx, groupID, sessionHash, selected.ID, openaiStickySessionTTL) } return selected, nil @@ -924,14 +970,18 @@ func (s *OpenAIGatewayService) SelectAccountForModelWithExclusions(ctx context.C // // tryStickySessionHit attempts to get account from sticky session. // Returns account if hit and usable; clears session and returns nil if account is unavailable. -func (s *OpenAIGatewayService) tryStickySessionHit(ctx context.Context, groupID *int64, sessionHash, cacheKey, requestedModel string, excludedIDs map[int64]struct{}) *Account { +func (s *OpenAIGatewayService) tryStickySessionHit(ctx context.Context, groupID *int64, sessionHash, requestedModel string, excludedIDs map[int64]struct{}, stickyAccountID int64) *Account { if sessionHash == "" { return nil } - accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), cacheKey) - if err != nil || accountID <= 0 { - return nil + accountID := stickyAccountID + if accountID <= 0 { + var err error + accountID, err = s.getStickySessionAccountID(ctx, groupID, sessionHash) + if err != nil || accountID <= 0 { + return nil + } } if _, excluded := excludedIDs[accountID]; excluded { @@ -946,7 +996,7 @@ func (s *OpenAIGatewayService) tryStickySessionHit(ctx context.Context, groupID // 检查账号是否需要清理粘性会话 // Check if sticky session should be cleared if shouldClearStickySession(account, requestedModel) { - _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), cacheKey) + _ = s.deleteStickySessionAccountID(ctx, groupID, sessionHash) return nil } @@ -961,7 +1011,7 @@ func (s *OpenAIGatewayService) tryStickySessionHit(ctx context.Context, groupID // 刷新会话 TTL 并返回账号 // Refresh session TTL and return account - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), cacheKey, openaiStickySessionTTL) + _ = s.refreshStickySessionTTL(ctx, groupID, sessionHash, openaiStickySessionTTL) return account } @@ -1047,12 +1097,12 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex cfg := s.schedulingConfig() var stickyAccountID int64 if sessionHash != "" && s.cache != nil { - if accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash); err == nil { + if accountID, err := s.getStickySessionAccountID(ctx, groupID, sessionHash); err == nil { stickyAccountID = accountID } } if s.concurrencyService == nil || !cfg.LoadBatchEnabled { - account, err := s.SelectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs) + account, err := s.selectAccountForModelWithExclusions(ctx, groupID, sessionHash, requestedModel, excludedIDs, stickyAccountID) if err != nil { return nil, err } @@ -1107,19 +1157,19 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex // ============ Layer 1: Sticky session ============ if sessionHash != "" { - accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) - if err == nil && accountID > 0 && !isExcluded(accountID) { + accountID := stickyAccountID + if accountID > 0 && !isExcluded(accountID) { account, err := s.getSchedulableAccount(ctx, accountID) if err == nil { clearSticky := shouldClearStickySession(account, requestedModel) if clearSticky { - _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash) + _ = s.deleteStickySessionAccountID(ctx, groupID, sessionHash) } if !clearSticky && account.IsSchedulable() && account.IsOpenAI() && (requestedModel == "" || account.IsModelSupported(requestedModel)) { result, err := s.tryAcquireAccountSlot(ctx, accountID, account.Concurrency) if err == nil && result.Acquired { - _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), "openai:"+sessionHash, openaiStickySessionTTL) + _ = s.refreshStickySessionTTL(ctx, groupID, sessionHash, openaiStickySessionTTL) return &AccountSelectionResult{ Account: account, Acquired: true, @@ -1183,7 +1233,7 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex result, err := s.tryAcquireAccountSlot(ctx, acc.ID, acc.Concurrency) if err == nil && result.Acquired { if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, acc.ID, openaiStickySessionTTL) + _ = s.setStickySessionAccountID(ctx, groupID, sessionHash, acc.ID, openaiStickySessionTTL) } return &AccountSelectionResult{ Account: acc, @@ -1233,7 +1283,7 @@ func (s *OpenAIGatewayService) SelectAccountWithLoadAwareness(ctx context.Contex result, err := s.tryAcquireAccountSlot(ctx, item.account.ID, item.account.Concurrency) if err == nil && result.Acquired { if sessionHash != "" { - _ = s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), "openai:"+sessionHash, item.account.ID, openaiStickySessionTTL) + _ = s.setStickySessionAccountID(ctx, groupID, sessionHash, item.account.ID, openaiStickySessionTTL) } return &AccountSelectionResult{ Account: item.account, @@ -1430,12 +1480,61 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco // Track if body needs re-serialization bodyModified := false + // 单字段补丁快速路径:只要整个变更集最终可归约为同一路径的 set/delete,就避免全量 Marshal。 + patchDisabled := false + patchHasOp := false + patchDelete := false + patchPath := "" + var patchValue any + markPatchSet := func(path string, value any) { + if strings.TrimSpace(path) == "" { + patchDisabled = true + return + } + if patchDisabled { + return + } + if !patchHasOp { + patchHasOp = true + patchDelete = false + patchPath = path + patchValue = value + return + } + if patchDelete || patchPath != path { + patchDisabled = true + return + } + patchValue = value + } + markPatchDelete := func(path string) { + if strings.TrimSpace(path) == "" { + patchDisabled = true + return + } + if patchDisabled { + return + } + if !patchHasOp { + patchHasOp = true + patchDelete = true + patchPath = path + return + } + if !patchDelete || patchPath != path { + patchDisabled = true + } + } + disablePatch := func() { + patchDisabled = true + } // 非透传模式下,保持历史行为:非 Codex CLI 请求在 instructions 为空时注入默认指令。 if !isCodexCLI && isInstructionsEmpty(reqBody) { if instructions := strings.TrimSpace(GetOpenCodeInstructions()); instructions != "" { reqBody["instructions"] = instructions bodyModified = true + markPatchSet("instructions", instructions) } } @@ -1445,6 +1544,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco logger.LegacyPrintf("service.openai_gateway", "[OpenAI] Model mapping applied: %s -> %s (account: %s, isCodexCLI: %v)", reqModel, mappedModel, account.Name, isCodexCLI) reqBody["model"] = mappedModel bodyModified = true + markPatchSet("model", mappedModel) } // 针对所有 OpenAI 账号执行 Codex 模型名规范化,确保上游识别一致。 @@ -1456,6 +1556,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco reqBody["model"] = normalizedModel mappedModel = normalizedModel bodyModified = true + markPatchSet("model", normalizedModel) } } @@ -1464,6 +1565,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco if effort, ok := reasoning["effort"].(string); ok && effort == "minimal" { reasoning["effort"] = "none" bodyModified = true + markPatchSet("reasoning.effort", "none") logger.LegacyPrintf("service.openai_gateway", "[OpenAI] Normalized reasoning.effort: minimal -> none (account: %s)", account.Name) } } @@ -1472,6 +1574,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco codexResult := applyCodexOAuthTransform(reqBody, isCodexCLI) if codexResult.Modified { bodyModified = true + disablePatch() } if codexResult.NormalizedModel != "" { mappedModel = codexResult.NormalizedModel @@ -1491,22 +1594,27 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco if account.Type == AccountTypeAPIKey { delete(reqBody, "max_output_tokens") bodyModified = true + markPatchDelete("max_output_tokens") } case PlatformAnthropic: // For Anthropic (Claude), convert to max_tokens delete(reqBody, "max_output_tokens") + markPatchDelete("max_output_tokens") if _, hasMaxTokens := reqBody["max_tokens"]; !hasMaxTokens { reqBody["max_tokens"] = maxOutputTokens + disablePatch() } bodyModified = true case PlatformGemini: // For Gemini, remove (will be handled by Gemini-specific transform) delete(reqBody, "max_output_tokens") bodyModified = true + markPatchDelete("max_output_tokens") default: // For unknown platforms, remove to be safe delete(reqBody, "max_output_tokens") bodyModified = true + markPatchDelete("max_output_tokens") } } @@ -1515,6 +1623,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco if account.Type == AccountTypeAPIKey || account.Platform != PlatformOpenAI { delete(reqBody, "max_completion_tokens") bodyModified = true + markPatchDelete("max_completion_tokens") } } @@ -1524,6 +1633,7 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco if _, has := reqBody[unsupportedField]; has { delete(reqBody, unsupportedField) bodyModified = true + markPatchDelete(unsupportedField) } } } @@ -1534,15 +1644,30 @@ func (s *OpenAIGatewayService) Forward(ctx context.Context, c *gin.Context, acco if _, has := reqBody["previous_response_id"]; has { delete(reqBody, "previous_response_id") bodyModified = true + markPatchDelete("previous_response_id") } } // Re-serialize body only if modified if bodyModified { - var err error - body, err = json.Marshal(reqBody) - if err != nil { - return nil, fmt.Errorf("serialize request body: %w", err) + serializedByPatch := false + if !patchDisabled && patchHasOp { + var patchErr error + if patchDelete { + body, patchErr = sjson.DeleteBytes(body, patchPath) + } else { + body, patchErr = sjson.SetBytes(body, patchPath, patchValue) + } + if patchErr == nil { + serializedByPatch = true + } + } + if !serializedByPatch { + var marshalErr error + body, marshalErr = json.Marshal(reqBody) + if marshalErr != nil { + return nil, fmt.Errorf("serialize request body: %w", marshalErr) + } } } @@ -2111,7 +2236,7 @@ func (s *OpenAIGatewayService) handleErrorResponsePassthrough( UpstreamResponseBody: upstreamDetail, }) - writeOpenAIPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.cfg) + writeOpenAIPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := resp.Header.Get("Content-Type") if contentType == "" { contentType = "application/json" @@ -2178,7 +2303,7 @@ func (s *OpenAIGatewayService) handleStreamingResponsePassthrough( account *Account, startTime time.Time, ) (*openaiStreamingResultPassthrough, error) { - writeOpenAIPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.cfg) + writeOpenAIPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) // SSE headers c.Header("Content-Type", "text/event-stream") @@ -2305,7 +2430,7 @@ func (s *OpenAIGatewayService) handleNonStreamingResponsePassthrough( usage = s.parseSSEUsageFromBody(string(body)) } - writeOpenAIPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.cfg) + writeOpenAIPassthroughResponseHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := resp.Header.Get("Content-Type") if contentType == "" { @@ -2315,12 +2440,12 @@ func (s *OpenAIGatewayService) handleNonStreamingResponsePassthrough( return usage, nil } -func writeOpenAIPassthroughResponseHeaders(dst http.Header, src http.Header, cfg *config.Config) { +func writeOpenAIPassthroughResponseHeaders(dst http.Header, src http.Header, filter *responseheaders.CompiledHeaderFilter) { if dst == nil || src == nil { return } - if cfg != nil { - responseheaders.WriteFilteredHeaders(dst, src, cfg.Security.ResponseHeaders) + if filter != nil { + responseheaders.WriteFilteredHeaders(dst, src, filter) } else { // 兜底:尽量保留最基础的 content-type if v := strings.TrimSpace(src.Get("Content-Type")); v != "" { @@ -2599,8 +2724,8 @@ type openaiStreamingResult struct { } func (s *OpenAIGatewayService) handleStreamingResponse(ctx context.Context, resp *http.Response, c *gin.Context, account *Account, startTime time.Time, originalModel, mappedModel string) (*openaiStreamingResult, error) { - if s.cfg != nil { - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + if s.responseHeaderFilter != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) } // Set SSE response headers @@ -3022,7 +3147,7 @@ func (s *OpenAIGatewayService) handleNonStreamingResponse(ctx context.Context, r body = s.replaceModelInResponseBody(body, mappedModel, originalModel) } - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := "application/json" if s.cfg != nil && !s.cfg.Security.ResponseHeaders.Enabled { @@ -3064,7 +3189,7 @@ func (s *OpenAIGatewayService) handleOAuthSSEToJSON(resp *http.Response, c *gin. body = []byte(bodyText) } - responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.cfg.Security.ResponseHeaders) + responseheaders.WriteFilteredHeaders(c.Writer.Header(), resp.Header, s.responseHeaderFilter) contentType := "application/json; charset=utf-8" if !ok { @@ -3625,6 +3750,9 @@ func getOpenAIRequestBodyMap(c *gin.Context, body []byte) (map[string]any, error if err := json.Unmarshal(body, &reqBody); err != nil { return nil, fmt.Errorf("parse request: %w", err) } + if c != nil { + c.Set(OpenAIParsedRequestBodyKey, reqBody) + } return reqBody, nil } diff --git a/backend/internal/service/openai_gateway_service_hotpath_test.go b/backend/internal/service/openai_gateway_service_hotpath_test.go index 6b11831f8..f73c06c5e 100644 --- a/backend/internal/service/openai_gateway_service_hotpath_test.go +++ b/backend/internal/service/openai_gateway_service_hotpath_test.go @@ -123,3 +123,19 @@ func TestGetOpenAIRequestBodyMap_ParseErrorWithoutCache(t *testing.T) { require.Error(t, err) require.Contains(t, err.Error(), "parse request") } + +func TestGetOpenAIRequestBodyMap_WriteBackContextCache(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + + got, err := getOpenAIRequestBodyMap(c, []byte(`{"model":"gpt-5","stream":true}`)) + require.NoError(t, err) + require.Equal(t, "gpt-5", got["model"]) + + cached, ok := c.Get(OpenAIParsedRequestBodyKey) + require.True(t, ok) + cachedMap, ok := cached.(map[string]any) + require.True(t, ok) + require.Equal(t, got, cachedMap) +} diff --git a/backend/internal/service/openai_gateway_service_test.go b/backend/internal/service/openai_gateway_service_test.go index 08958b445..f3d96d20c 100644 --- a/backend/internal/service/openai_gateway_service_test.go +++ b/backend/internal/service/openai_gateway_service_test.go @@ -4,9 +4,8 @@ import ( "bufio" "bytes" "context" - "crypto/sha256" - "encoding/hex" "errors" + "fmt" "io" "net/http" "net/http/httptest" @@ -15,6 +14,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/cespare/xxhash/v2" "github.com/gin-gonic/gin" "github.com/stretchr/testify/require" ) @@ -168,7 +168,7 @@ func TestOpenAIGatewayService_GenerateSessionHash_Priority(t *testing.T) { } } -func TestOpenAIGatewayService_GenerateSessionHash_UsesSHA256(t *testing.T) { +func TestOpenAIGatewayService_GenerateSessionHash_UsesXXHash64(t *testing.T) { gin.SetMode(gin.TestMode) rec := httptest.NewRecorder() c, _ := gin.CreateTestContext(rec) @@ -178,11 +178,26 @@ func TestOpenAIGatewayService_GenerateSessionHash_UsesSHA256(t *testing.T) { svc := &OpenAIGatewayService{} got := svc.GenerateSessionHash(c, nil) - sum := sha256.Sum256([]byte("sess-fixed-value")) - want := hex.EncodeToString(sum[:]) + want := fmt.Sprintf("%016x", xxhash.Sum64String("sess-fixed-value")) require.Equal(t, want, got) } +func TestOpenAIGatewayService_GenerateSessionHash_AttachesLegacyHashToContext(t *testing.T) { + gin.SetMode(gin.TestMode) + rec := httptest.NewRecorder() + c, _ := gin.CreateTestContext(rec) + c.Request = httptest.NewRequest(http.MethodPost, "/openai/v1/responses", nil) + + c.Request.Header.Set("session_id", "sess-legacy-check") + svc := &OpenAIGatewayService{} + + sessionHash := svc.GenerateSessionHash(c, nil) + require.NotEmpty(t, sessionHash) + require.NotNil(t, c.Request) + require.NotNil(t, c.Request.Context()) + require.NotEmpty(t, openAILegacySessionHashFromContext(c.Request.Context())) +} + func (c stubConcurrencyCache) GetAccountWaitingCount(ctx context.Context, accountID int64) (int, error) { if c.waitCounts != nil { if count, ok := c.waitCounts[accountID]; ok { diff --git a/backend/internal/service/openai_sticky_compat.go b/backend/internal/service/openai_sticky_compat.go new file mode 100644 index 000000000..e897debc2 --- /dev/null +++ b/backend/internal/service/openai_sticky_compat.go @@ -0,0 +1,214 @@ +package service + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "fmt" + "strings" + "sync/atomic" + "time" + + "github.com/cespare/xxhash/v2" + "github.com/gin-gonic/gin" +) + +type openAILegacySessionHashContextKey struct{} + +var openAILegacySessionHashKey = openAILegacySessionHashContextKey{} + +var ( + openAIStickyLegacyReadFallbackTotal atomic.Int64 + openAIStickyLegacyReadFallbackHit atomic.Int64 + openAIStickyLegacyDualWriteTotal atomic.Int64 +) + +func openAIStickyCompatStats() (legacyReadFallbackTotal, legacyReadFallbackHit, legacyDualWriteTotal int64) { + return openAIStickyLegacyReadFallbackTotal.Load(), + openAIStickyLegacyReadFallbackHit.Load(), + openAIStickyLegacyDualWriteTotal.Load() +} + +func deriveOpenAISessionHashes(sessionID string) (currentHash string, legacyHash string) { + normalized := strings.TrimSpace(sessionID) + if normalized == "" { + return "", "" + } + + currentHash = fmt.Sprintf("%016x", xxhash.Sum64String(normalized)) + sum := sha256.Sum256([]byte(normalized)) + legacyHash = hex.EncodeToString(sum[:]) + return currentHash, legacyHash +} + +func withOpenAILegacySessionHash(ctx context.Context, legacyHash string) context.Context { + if ctx == nil { + return nil + } + trimmed := strings.TrimSpace(legacyHash) + if trimmed == "" { + return ctx + } + return context.WithValue(ctx, openAILegacySessionHashKey, trimmed) +} + +func openAILegacySessionHashFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + value, _ := ctx.Value(openAILegacySessionHashKey).(string) + return strings.TrimSpace(value) +} + +func attachOpenAILegacySessionHashToGin(c *gin.Context, legacyHash string) { + if c == nil || c.Request == nil { + return + } + c.Request = c.Request.WithContext(withOpenAILegacySessionHash(c.Request.Context(), legacyHash)) +} + +func (s *OpenAIGatewayService) openAISessionHashReadOldFallbackEnabled() bool { + if s == nil || s.cfg == nil { + return true + } + return s.cfg.Gateway.OpenAIWS.SessionHashReadOldFallback +} + +func (s *OpenAIGatewayService) openAISessionHashDualWriteOldEnabled() bool { + if s == nil || s.cfg == nil { + return true + } + return s.cfg.Gateway.OpenAIWS.SessionHashDualWriteOld +} + +func (s *OpenAIGatewayService) openAISessionCacheKey(sessionHash string) string { + normalized := strings.TrimSpace(sessionHash) + if normalized == "" { + return "" + } + return "openai:" + normalized +} + +func (s *OpenAIGatewayService) openAILegacySessionCacheKey(ctx context.Context, sessionHash string) string { + legacyHash := openAILegacySessionHashFromContext(ctx) + if legacyHash == "" { + return "" + } + legacyKey := "openai:" + legacyHash + if legacyKey == s.openAISessionCacheKey(sessionHash) { + return "" + } + return legacyKey +} + +func (s *OpenAIGatewayService) openAIStickyLegacyTTL(ttl time.Duration) time.Duration { + legacyTTL := ttl + if legacyTTL <= 0 { + legacyTTL = openaiStickySessionTTL + } + if legacyTTL > 10*time.Minute { + return 10 * time.Minute + } + return legacyTTL +} + +func (s *OpenAIGatewayService) getStickySessionAccountID(ctx context.Context, groupID *int64, sessionHash string) (int64, error) { + if s == nil || s.cache == nil { + return 0, nil + } + + primaryKey := s.openAISessionCacheKey(sessionHash) + if primaryKey == "" { + return 0, nil + } + + accountID, err := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), primaryKey) + if err == nil && accountID > 0 { + return accountID, nil + } + if !s.openAISessionHashReadOldFallbackEnabled() { + return accountID, err + } + + legacyKey := s.openAILegacySessionCacheKey(ctx, sessionHash) + if legacyKey == "" { + return accountID, err + } + + openAIStickyLegacyReadFallbackTotal.Add(1) + legacyAccountID, legacyErr := s.cache.GetSessionAccountID(ctx, derefGroupID(groupID), legacyKey) + if legacyErr == nil && legacyAccountID > 0 { + openAIStickyLegacyReadFallbackHit.Add(1) + return legacyAccountID, nil + } + return accountID, err +} + +func (s *OpenAIGatewayService) setStickySessionAccountID(ctx context.Context, groupID *int64, sessionHash string, accountID int64, ttl time.Duration) error { + if s == nil || s.cache == nil || accountID <= 0 { + return nil + } + primaryKey := s.openAISessionCacheKey(sessionHash) + if primaryKey == "" { + return nil + } + + if err := s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), primaryKey, accountID, ttl); err != nil { + return err + } + + if !s.openAISessionHashDualWriteOldEnabled() { + return nil + } + legacyKey := s.openAILegacySessionCacheKey(ctx, sessionHash) + if legacyKey == "" { + return nil + } + if err := s.cache.SetSessionAccountID(ctx, derefGroupID(groupID), legacyKey, accountID, s.openAIStickyLegacyTTL(ttl)); err != nil { + return err + } + openAIStickyLegacyDualWriteTotal.Add(1) + return nil +} + +func (s *OpenAIGatewayService) refreshStickySessionTTL(ctx context.Context, groupID *int64, sessionHash string, ttl time.Duration) error { + if s == nil || s.cache == nil { + return nil + } + primaryKey := s.openAISessionCacheKey(sessionHash) + if primaryKey == "" { + return nil + } + + err := s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), primaryKey, ttl) + if !s.openAISessionHashReadOldFallbackEnabled() && !s.openAISessionHashDualWriteOldEnabled() { + return err + } + + legacyKey := s.openAILegacySessionCacheKey(ctx, sessionHash) + if legacyKey != "" { + _ = s.cache.RefreshSessionTTL(ctx, derefGroupID(groupID), legacyKey, s.openAIStickyLegacyTTL(ttl)) + } + return err +} + +func (s *OpenAIGatewayService) deleteStickySessionAccountID(ctx context.Context, groupID *int64, sessionHash string) error { + if s == nil || s.cache == nil { + return nil + } + primaryKey := s.openAISessionCacheKey(sessionHash) + if primaryKey == "" { + return nil + } + + err := s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), primaryKey) + if !s.openAISessionHashReadOldFallbackEnabled() && !s.openAISessionHashDualWriteOldEnabled() { + return err + } + + legacyKey := s.openAILegacySessionCacheKey(ctx, sessionHash) + if legacyKey != "" { + _ = s.cache.DeleteSessionAccountID(ctx, derefGroupID(groupID), legacyKey) + } + return err +} diff --git a/backend/internal/service/openai_sticky_compat_test.go b/backend/internal/service/openai_sticky_compat_test.go new file mode 100644 index 000000000..9f57c3580 --- /dev/null +++ b/backend/internal/service/openai_sticky_compat_test.go @@ -0,0 +1,96 @@ +package service + +import ( + "context" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/stretchr/testify/require" +) + +func TestGetStickySessionAccountID_FallbackToLegacyKey(t *testing.T) { + beforeFallbackTotal, beforeFallbackHit, _ := openAIStickyCompatStats() + + cache := &stubGatewayCache{ + sessionBindings: map[string]int64{ + "openai:legacy-hash": 42, + }, + } + svc := &OpenAIGatewayService{ + cache: cache, + cfg: &config.Config{ + Gateway: config.GatewayConfig{ + OpenAIWS: config.GatewayOpenAIWSConfig{ + SessionHashReadOldFallback: true, + }, + }, + }, + } + + ctx := withOpenAILegacySessionHash(context.Background(), "legacy-hash") + accountID, err := svc.getStickySessionAccountID(ctx, nil, "new-hash") + require.NoError(t, err) + require.Equal(t, int64(42), accountID) + + afterFallbackTotal, afterFallbackHit, _ := openAIStickyCompatStats() + require.Equal(t, beforeFallbackTotal+1, afterFallbackTotal) + require.Equal(t, beforeFallbackHit+1, afterFallbackHit) +} + +func TestSetStickySessionAccountID_DualWriteOldEnabled(t *testing.T) { + _, _, beforeDualWriteTotal := openAIStickyCompatStats() + + cache := &stubGatewayCache{sessionBindings: map[string]int64{}} + svc := &OpenAIGatewayService{ + cache: cache, + cfg: &config.Config{ + Gateway: config.GatewayConfig{ + OpenAIWS: config.GatewayOpenAIWSConfig{ + SessionHashDualWriteOld: true, + }, + }, + }, + } + + ctx := withOpenAILegacySessionHash(context.Background(), "legacy-hash") + err := svc.setStickySessionAccountID(ctx, nil, "new-hash", 9, openaiStickySessionTTL) + require.NoError(t, err) + require.Equal(t, int64(9), cache.sessionBindings["openai:new-hash"]) + require.Equal(t, int64(9), cache.sessionBindings["openai:legacy-hash"]) + + _, _, afterDualWriteTotal := openAIStickyCompatStats() + require.Equal(t, beforeDualWriteTotal+1, afterDualWriteTotal) +} + +func TestSetStickySessionAccountID_DualWriteOldDisabled(t *testing.T) { + cache := &stubGatewayCache{sessionBindings: map[string]int64{}} + svc := &OpenAIGatewayService{ + cache: cache, + cfg: &config.Config{ + Gateway: config.GatewayConfig{ + OpenAIWS: config.GatewayOpenAIWSConfig{ + SessionHashDualWriteOld: false, + }, + }, + }, + } + + ctx := withOpenAILegacySessionHash(context.Background(), "legacy-hash") + err := svc.setStickySessionAccountID(ctx, nil, "new-hash", 9, openaiStickySessionTTL) + require.NoError(t, err) + require.Equal(t, int64(9), cache.sessionBindings["openai:new-hash"]) + _, exists := cache.sessionBindings["openai:legacy-hash"] + require.False(t, exists) +} + +func TestSnapshotOpenAICompatibilityFallbackMetrics(t *testing.T) { + before := SnapshotOpenAICompatibilityFallbackMetrics() + + ctx := context.WithValue(context.Background(), ctxkey.ThinkingEnabled, true) + _, _ = ThinkingEnabledFromContext(ctx) + + after := SnapshotOpenAICompatibilityFallbackMetrics() + require.GreaterOrEqual(t, after.MetadataLegacyFallbackTotal, before.MetadataLegacyFallbackTotal+1) + require.GreaterOrEqual(t, after.MetadataLegacyFallbackThinkingEnabledTotal, before.MetadataLegacyFallbackThinkingEnabledTotal+1) +} diff --git a/backend/internal/service/openai_ws_forwarder.go b/backend/internal/service/openai_ws_forwarder.go index b496ac769..8f37a05ae 100644 --- a/backend/internal/service/openai_ws_forwarder.go +++ b/backend/internal/service/openai_ws_forwarder.go @@ -3,8 +3,6 @@ package service import ( "bytes" "context" - "crypto/sha256" - "encoding/hex" "encoding/json" "errors" "fmt" @@ -536,13 +534,29 @@ func openAIWSPayloadString(payload map[string]any, key string) string { } } -func openAIWSSessionHashFromID(sessionID string) string { - normalized := strings.TrimSpace(sessionID) - if normalized == "" { +func openAIWSPayloadStringFromRaw(payload []byte, key string) string { + if len(payload) == 0 || strings.TrimSpace(key) == "" { return "" } - sum := sha256.Sum256([]byte(normalized)) - return hex.EncodeToString(sum[:]) + return strings.TrimSpace(gjson.GetBytes(payload, key).String()) +} + +func openAIWSPayloadBoolFromRaw(payload []byte, key string, defaultValue bool) bool { + if len(payload) == 0 || strings.TrimSpace(key) == "" { + return defaultValue + } + value := gjson.GetBytes(payload, key) + if !value.Exists() { + return defaultValue + } + if value.Type != gjson.True && value.Type != gjson.False { + return defaultValue + } + return value.Bool() +} + +func openAIWSSessionHashesFromID(sessionID string) (string, string) { + return deriveOpenAISessionHashes(sessionID) } func extractOpenAIWSImageURL(value any) string { @@ -1195,6 +1209,23 @@ func (s *OpenAIGatewayService) isOpenAIWSStoreDisabledInRequest(reqBody map[stri return !storeEnabled } +func (s *OpenAIGatewayService) isOpenAIWSStoreDisabledInRequestRaw(reqBody []byte, account *Account) bool { + if account != nil && account.Type == AccountTypeOAuth && !s.isOpenAIWSStoreRecoveryAllowed(account) { + return true + } + if len(reqBody) == 0 { + return false + } + storeValue := gjson.GetBytes(reqBody, "store") + if !storeValue.Exists() { + return false + } + if storeValue.Type != gjson.True && storeValue.Type != gjson.False { + return false + } + return !storeValue.Bool() +} + func (s *OpenAIGatewayService) openAIWSStoreDisabledConnMode() string { if s == nil || s.cfg == nil { return openAIWSStoreDisabledConnModeStrict @@ -1324,7 +1355,9 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( groupID := getOpenAIGroupIDFromContext(c) sessionHash := s.GenerateSessionHash(c, nil) if sessionHash == "" { - sessionHash = openAIWSSessionHashFromID(promptCacheKey) + var legacySessionHash string + sessionHash, legacySessionHash = openAIWSSessionHashesFromID(promptCacheKey) + attachOpenAILegacySessionHashToGin(c, legacySessionHash) } if turnState == "" && stateStore != nil && sessionHash != "" { if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { @@ -1536,8 +1569,8 @@ func (s *OpenAIGatewayService) forwardOpenAIWSV2( var flusher http.Flusher if reqStream { - if s.cfg != nil { - responseheaders.WriteFilteredHeaders(c.Writer.Header(), http.Header{}, s.cfg.Security.ResponseHeaders) + if s.responseHeaderFilter != nil { + responseheaders.WriteFilteredHeaders(c.Writer.Header(), http.Header{}, s.responseHeaderFilter) } c.Header("Content-Type", "text/event-stream") c.Header("Cache-Control", "no-cache") @@ -1918,12 +1951,38 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( debugEnabled := isOpenAIWSModeDebugEnabled() type openAIWSClientPayload struct { - payload map[string]any + payloadRaw []byte + rawForHash []byte promptCacheKey string previousResponseID string originalModel string payloadBytes int - trimmedRaw []byte + } + + applyPayloadMutation := func(current []byte, path string, value any) ([]byte, error) { + next, err := sjson.SetBytes(current, path, value) + if err == nil { + return next, nil + } + + // 仅在确实需要修改 payload 且 sjson 失败时,退回 map 路径确保兼容性。 + payload := make(map[string]any) + if unmarshalErr := json.Unmarshal(current, &payload); unmarshalErr != nil { + return nil, err + } + switch path { + case "type", "model": + payload[path] = value + case "client_metadata." + openAIWSTurnMetadataHeader: + setOpenAIWSTurnMetadata(payload, fmt.Sprintf("%v", value)) + default: + return nil, err + } + rebuilt, marshalErr := json.Marshal(payload) + if marshalErr != nil { + return nil, marshalErr + } + return rebuilt, nil } parseClientPayload := func(raw []byte) (openAIWSClientPayload, error) { @@ -1931,17 +1990,21 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( if len(trimmed) == 0 { return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "empty websocket request payload", nil) } - - payload := make(map[string]any) - if err := json.Unmarshal(trimmed, &payload); err != nil { - return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", err) + if !gjson.ValidBytes(trimmed) { + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", errors.New("invalid json")) } - eventType := openAIWSPayloadString(payload, "type") + values := gjson.GetManyBytes(trimmed, "type", "model", "prompt_cache_key", "previous_response_id") + eventType := strings.TrimSpace(values[0].String()) + normalized := trimmed switch eventType { case "": eventType = "response.create" - payload["type"] = eventType + next, setErr := applyPayloadMutation(normalized, "type", eventType) + if setErr != nil { + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", setErr) + } + normalized = next case "response.create": case "response.append": return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( @@ -1957,7 +2020,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( ) } - originalModel := openAIWSPayloadString(payload, "model") + originalModel := strings.TrimSpace(values[1].String()) if originalModel == "" { return openAIWSClientPayload{}, NewOpenAIWSClientCloseError( coderws.StatusPolicyViolation, @@ -1965,26 +2028,34 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( nil, ) } - promptCacheKey := openAIWSPayloadString(payload, "prompt_cache_key") - previousResponseID := openAIWSPayloadString(payload, "previous_response_id") + promptCacheKey := strings.TrimSpace(values[2].String()) + previousResponseID := strings.TrimSpace(values[3].String()) if turnMetadata := strings.TrimSpace(c.GetHeader(openAIWSTurnMetadataHeader)); turnMetadata != "" { - setOpenAIWSTurnMetadata(payload, turnMetadata) + next, setErr := applyPayloadMutation(normalized, "client_metadata."+openAIWSTurnMetadataHeader, turnMetadata) + if setErr != nil { + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", setErr) + } + normalized = next } mappedModel := account.GetMappedModel(originalModel) if normalizedModel := normalizeCodexModel(mappedModel); normalizedModel != "" { mappedModel = normalizedModel } if mappedModel != originalModel { - payload["model"] = mappedModel + next, setErr := applyPayloadMutation(normalized, "model", mappedModel) + if setErr != nil { + return openAIWSClientPayload{}, NewOpenAIWSClientCloseError(coderws.StatusPolicyViolation, "invalid websocket request payload", setErr) + } + normalized = next } return openAIWSClientPayload{ - payload: payload, + payloadRaw: normalized, + rawForHash: trimmed, promptCacheKey: promptCacheKey, previousResponseID: previousResponseID, originalModel: originalModel, - payloadBytes: len(trimmed), - trimmedRaw: trimmed, + payloadBytes: len(normalized), }, nil } @@ -1996,7 +2067,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( turnState := strings.TrimSpace(c.GetHeader(openAIWSTurnStateHeader)) stateStore := s.getOpenAIWSStateStore() groupID := getOpenAIGroupIDFromContext(c) - sessionHash := s.GenerateSessionHash(c, firstPayload.trimmedRaw) + sessionHash := s.GenerateSessionHash(c, firstPayload.rawForHash) if turnState == "" && stateStore != nil && sessionHash != "" { if savedTurnState, ok := stateStore.GetSessionTurnState(groupID, sessionHash); ok { turnState = savedTurnState @@ -2010,7 +2081,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } } - storeDisabled := s.isOpenAIWSStoreDisabledInRequest(firstPayload.payload, account) + storeDisabled := s.isOpenAIWSStoreDisabledInRequestRaw(firstPayload.payloadRaw, account) if stateStore != nil && storeDisabled && firstPayload.previousResponseID == "" && sessionHash != "" { if connID, ok := stateStore.GetSessionConn(groupID, sessionHash); ok { preferredConnID = connID @@ -2166,13 +2237,13 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( return payload, nil } - sendAndRelay := func(turn int, lease *openAIWSConnLease, payload map[string]any, payloadBytes int, originalModel string) (*OpenAIForwardResult, error) { + sendAndRelay := func(turn int, lease *openAIWSConnLease, payload []byte, payloadBytes int, originalModel string) (*OpenAIForwardResult, error) { if lease == nil { return nil, errors.New("upstream websocket lease is nil") } turnStart := time.Now() wroteDownstream := false - if err := lease.WriteJSONWithContextTimeout(ctx, payload, s.openAIWSWriteTimeout()); err != nil { + if err := lease.WriteJSONWithContextTimeout(ctx, json.RawMessage(payload), s.openAIWSWriteTimeout()); err != nil { return nil, wrapOpenAIWSIngressTurnError( "write_upstream", fmt.Errorf("write upstream websocket request: %w", err), @@ -2192,10 +2263,10 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( responseID := "" usage := OpenAIUsage{} var firstTokenMs *int - reqStream := true - turnPreviousResponseID := openAIWSPayloadString(payload, "previous_response_id") - turnPromptCacheKey := openAIWSPayloadString(payload, "prompt_cache_key") - turnStoreDisabled := s.isOpenAIWSStoreDisabledInRequest(payload, account) + reqStream := openAIWSPayloadBoolFromRaw(payload, "stream", true) + turnPreviousResponseID := openAIWSPayloadStringFromRaw(payload, "previous_response_id") + turnPromptCacheKey := openAIWSPayloadStringFromRaw(payload, "prompt_cache_key") + turnStoreDisabled := s.isOpenAIWSStoreDisabledInRequestRaw(payload, account) eventCount := 0 tokenEventCount := 0 terminalEventCount := 0 @@ -2215,9 +2286,6 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( mappedModelBytes = []byte(mappedModel) } } - if streamValue, ok := payload["stream"].(bool); ok { - reqStream = streamValue - } for { upstreamMessage, readErr := lease.ReadMessageWithContextTimeout(ctx, s.openAIWSReadTimeout()) if readErr != nil { @@ -2338,7 +2406,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( RequestID: responseID, Usage: usage, Model: originalModel, - ReasoningEffort: extractOpenAIReasoningEffort(payload, originalModel), + ReasoningEffort: extractOpenAIReasoningEffortFromBody(payload, originalModel), Stream: reqStream, OpenAIWSMode: true, Duration: time.Since(turnStart), @@ -2348,7 +2416,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } } - currentPayload := firstPayload.payload + currentPayload := firstPayload.payloadRaw currentOriginalModel := firstPayload.originalModel currentPayloadBytes := firstPayload.payloadBytes var sessionLease *openAIWSConnLease @@ -2418,7 +2486,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } } connID := sessionConnID - currentPreviousResponseID := openAIWSPayloadString(currentPayload, "previous_response_id") + currentPreviousResponseID := openAIWSPayloadStringFromRaw(currentPayload, "previous_response_id") if currentPreviousResponseID != "" { expectedPrev := strings.TrimSpace(lastTurnResponseID) chainedFromLast := expectedPrev != "" && currentPreviousResponseID == expectedPrev @@ -2435,7 +2503,7 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( openAIWSHeaderValueForLog(baseAcquireReq.Headers, "conversation_id"), turnState != "", len(turnState), - openAIWSPayloadString(currentPayload, "prompt_cache_key") != "", + openAIWSPayloadStringFromRaw(currentPayload, "prompt_cache_key") != "", storeDisabled, ) } @@ -2551,10 +2619,10 @@ func (s *OpenAIGatewayService) ProxyResponsesWebSocketFromClient( } } } - currentPayload = nextPayload.payload + currentPayload = nextPayload.payloadRaw currentOriginalModel = nextPayload.originalModel currentPayloadBytes = nextPayload.payloadBytes - storeDisabled = s.isOpenAIWSStoreDisabledInRequest(currentPayload, account) + storeDisabled = s.isOpenAIWSStoreDisabledInRequestRaw(currentPayload, account) turn++ } } diff --git a/backend/internal/service/openai_ws_forwarder_success_test.go b/backend/internal/service/openai_ws_forwarder_success_test.go index 516801588..592801f66 100644 --- a/backend/internal/service/openai_ws_forwarder_success_test.go +++ b/backend/internal/service/openai_ws_forwarder_success_test.go @@ -1229,9 +1229,22 @@ func (c *openAIWSCaptureConn) WriteJSON(ctx context.Context, value any) error { if c.closed { return errOpenAIWSConnClosed } - if payload, ok := value.(map[string]any); ok { + switch payload := value.(type) { + case map[string]any: c.lastWrite = cloneMapStringAny(payload) c.writes = append(c.writes, cloneMapStringAny(payload)) + case json.RawMessage: + var parsed map[string]any + if err := json.Unmarshal(payload, &parsed); err == nil { + c.lastWrite = cloneMapStringAny(parsed) + c.writes = append(c.writes, cloneMapStringAny(parsed)) + } + case []byte: + var parsed map[string]any + if err := json.Unmarshal(payload, &parsed); err == nil { + c.lastWrite = cloneMapStringAny(parsed) + c.writes = append(c.writes, cloneMapStringAny(parsed)) + } } return nil } diff --git a/backend/internal/service/openai_ws_pool.go b/backend/internal/service/openai_ws_pool.go index 8fba52481..94fed61ef 100644 --- a/backend/internal/service/openai_ws_pool.go +++ b/backend/internal/service/openai_ws_pool.go @@ -14,6 +14,7 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/config" + "golang.org/x/sync/errgroup" ) const ( @@ -531,6 +532,7 @@ type openAIWSConnPool struct { metrics openAIWSPoolMetrics workerStopCh chan struct{} + workerWg sync.WaitGroup closeOnce sync.Once } @@ -584,7 +586,10 @@ func (p *openAIWSConnPool) Close() { return } p.closeOnce.Do(func() { - close(p.workerStopCh) + if p.workerStopCh != nil { + close(p.workerStopCh) + } + p.workerWg.Wait() // 遍历所有账户池,关闭全部空闲连接。 p.accounts.Range(func(key, value any) bool { ap, ok := value.(*openAIWSAccountPool) @@ -607,8 +612,15 @@ func (p *openAIWSConnPool) startBackgroundWorkers() { if p == nil || p.workerStopCh == nil { return } - go p.runBackgroundPingWorker() - go p.runBackgroundCleanupWorker() + p.workerWg.Add(2) + go func() { + defer p.workerWg.Done() + p.runBackgroundPingWorker() + }() + go func() { + defer p.workerWg.Done() + p.runBackgroundCleanupWorker() + }() } type openAIWSIdlePingCandidate struct { @@ -637,14 +649,21 @@ func (p *openAIWSConnPool) runBackgroundPingSweep() { return } candidates := p.snapshotIdleConnsForPing() + var g errgroup.Group + g.SetLimit(10) for _, item := range candidates { + item := item if item.conn == nil || item.conn.isLeased() || item.conn.waiters.Load() > 0 { continue } - if err := item.conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { - p.evictConn(item.accountID, item.conn.id) - } + g.Go(func() error { + if err := item.conn.pingWithTimeout(openAIWSConnHealthCheckTO); err != nil { + p.evictConn(item.accountID, item.conn.id) + } + return nil + }) } + _ = g.Wait() } func (p *openAIWSConnPool) snapshotIdleConnsForPing() []openAIWSIdlePingCandidate { diff --git a/backend/internal/service/ops_retry.go b/backend/internal/service/ops_retry.go index 23a524ad2..feac7fa0f 100644 --- a/backend/internal/service/ops_retry.go +++ b/backend/internal/service/ops_retry.go @@ -13,7 +13,6 @@ import ( "time" "github.com/Wei-Shaw/sub2api/internal/domain" - "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" infraerrors "github.com/Wei-Shaw/sub2api/internal/pkg/errors" "github.com/gin-gonic/gin" "github.com/lib/pq" @@ -480,7 +479,7 @@ func (s *OpsService) executeClientRetry(ctx context.Context, reqType opsRetryReq attemptCtx := ctx if switches > 0 { - attemptCtx = context.WithValue(attemptCtx, ctxkey.AccountSwitchCount, switches) + attemptCtx = WithAccountSwitchCount(attemptCtx, switches, false) } exec := func() *opsRetryExecution { defer selection.ReleaseFunc() diff --git a/backend/internal/service/request_metadata.go b/backend/internal/service/request_metadata.go new file mode 100644 index 000000000..5c81bbf12 --- /dev/null +++ b/backend/internal/service/request_metadata.go @@ -0,0 +1,216 @@ +package service + +import ( + "context" + "sync/atomic" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" +) + +type requestMetadataContextKey struct{} + +var requestMetadataKey = requestMetadataContextKey{} + +type RequestMetadata struct { + IsMaxTokensOneHaikuRequest *bool + ThinkingEnabled *bool + PrefetchedStickyAccountID *int64 + PrefetchedStickyGroupID *int64 + SingleAccountRetry *bool + AccountSwitchCount *int +} + +var ( + requestMetadataFallbackIsMaxTokensOneHaikuTotal atomic.Int64 + requestMetadataFallbackThinkingEnabledTotal atomic.Int64 + requestMetadataFallbackPrefetchedStickyAccount atomic.Int64 + requestMetadataFallbackPrefetchedStickyGroup atomic.Int64 + requestMetadataFallbackSingleAccountRetryTotal atomic.Int64 + requestMetadataFallbackAccountSwitchCountTotal atomic.Int64 +) + +func RequestMetadataFallbackStats() (isMaxTokensOneHaiku, thinkingEnabled, prefetchedStickyAccount, prefetchedStickyGroup, singleAccountRetry, accountSwitchCount int64) { + return requestMetadataFallbackIsMaxTokensOneHaikuTotal.Load(), + requestMetadataFallbackThinkingEnabledTotal.Load(), + requestMetadataFallbackPrefetchedStickyAccount.Load(), + requestMetadataFallbackPrefetchedStickyGroup.Load(), + requestMetadataFallbackSingleAccountRetryTotal.Load(), + requestMetadataFallbackAccountSwitchCountTotal.Load() +} + +func metadataFromContext(ctx context.Context) *RequestMetadata { + if ctx == nil { + return nil + } + md, _ := ctx.Value(requestMetadataKey).(*RequestMetadata) + return md +} + +func updateRequestMetadata( + ctx context.Context, + bridgeOldKeys bool, + update func(md *RequestMetadata), + legacyBridge func(ctx context.Context) context.Context, +) context.Context { + if ctx == nil { + return nil + } + current := metadataFromContext(ctx) + next := &RequestMetadata{} + if current != nil { + *next = *current + } + update(next) + ctx = context.WithValue(ctx, requestMetadataKey, next) + if bridgeOldKeys && legacyBridge != nil { + ctx = legacyBridge(ctx) + } + return ctx +} + +func WithIsMaxTokensOneHaikuRequest(ctx context.Context, value bool, bridgeOldKeys bool) context.Context { + return updateRequestMetadata(ctx, bridgeOldKeys, func(md *RequestMetadata) { + v := value + md.IsMaxTokensOneHaikuRequest = &v + }, func(base context.Context) context.Context { + return context.WithValue(base, ctxkey.IsMaxTokensOneHaikuRequest, value) + }) +} + +func WithThinkingEnabled(ctx context.Context, value bool, bridgeOldKeys bool) context.Context { + return updateRequestMetadata(ctx, bridgeOldKeys, func(md *RequestMetadata) { + v := value + md.ThinkingEnabled = &v + }, func(base context.Context) context.Context { + return context.WithValue(base, ctxkey.ThinkingEnabled, value) + }) +} + +func WithPrefetchedStickySession(ctx context.Context, accountID, groupID int64, bridgeOldKeys bool) context.Context { + return updateRequestMetadata(ctx, bridgeOldKeys, func(md *RequestMetadata) { + account := accountID + group := groupID + md.PrefetchedStickyAccountID = &account + md.PrefetchedStickyGroupID = &group + }, func(base context.Context) context.Context { + bridged := context.WithValue(base, ctxkey.PrefetchedStickyAccountID, accountID) + return context.WithValue(bridged, ctxkey.PrefetchedStickyGroupID, groupID) + }) +} + +func WithSingleAccountRetry(ctx context.Context, value bool, bridgeOldKeys bool) context.Context { + return updateRequestMetadata(ctx, bridgeOldKeys, func(md *RequestMetadata) { + v := value + md.SingleAccountRetry = &v + }, func(base context.Context) context.Context { + return context.WithValue(base, ctxkey.SingleAccountRetry, value) + }) +} + +func WithAccountSwitchCount(ctx context.Context, value int, bridgeOldKeys bool) context.Context { + return updateRequestMetadata(ctx, bridgeOldKeys, func(md *RequestMetadata) { + v := value + md.AccountSwitchCount = &v + }, func(base context.Context) context.Context { + return context.WithValue(base, ctxkey.AccountSwitchCount, value) + }) +} + +func IsMaxTokensOneHaikuRequestFromContext(ctx context.Context) (bool, bool) { + if md := metadataFromContext(ctx); md != nil && md.IsMaxTokensOneHaikuRequest != nil { + return *md.IsMaxTokensOneHaikuRequest, true + } + if ctx == nil { + return false, false + } + if value, ok := ctx.Value(ctxkey.IsMaxTokensOneHaikuRequest).(bool); ok { + requestMetadataFallbackIsMaxTokensOneHaikuTotal.Add(1) + return value, true + } + return false, false +} + +func ThinkingEnabledFromContext(ctx context.Context) (bool, bool) { + if md := metadataFromContext(ctx); md != nil && md.ThinkingEnabled != nil { + return *md.ThinkingEnabled, true + } + if ctx == nil { + return false, false + } + if value, ok := ctx.Value(ctxkey.ThinkingEnabled).(bool); ok { + requestMetadataFallbackThinkingEnabledTotal.Add(1) + return value, true + } + return false, false +} + +func PrefetchedStickyGroupIDFromContext(ctx context.Context) (int64, bool) { + if md := metadataFromContext(ctx); md != nil && md.PrefetchedStickyGroupID != nil { + return *md.PrefetchedStickyGroupID, true + } + if ctx == nil { + return 0, false + } + v := ctx.Value(ctxkey.PrefetchedStickyGroupID) + switch t := v.(type) { + case int64: + requestMetadataFallbackPrefetchedStickyGroup.Add(1) + return t, true + case int: + requestMetadataFallbackPrefetchedStickyGroup.Add(1) + return int64(t), true + } + return 0, false +} + +func PrefetchedStickyAccountIDFromContext(ctx context.Context) (int64, bool) { + if md := metadataFromContext(ctx); md != nil && md.PrefetchedStickyAccountID != nil { + return *md.PrefetchedStickyAccountID, true + } + if ctx == nil { + return 0, false + } + v := ctx.Value(ctxkey.PrefetchedStickyAccountID) + switch t := v.(type) { + case int64: + requestMetadataFallbackPrefetchedStickyAccount.Add(1) + return t, true + case int: + requestMetadataFallbackPrefetchedStickyAccount.Add(1) + return int64(t), true + } + return 0, false +} + +func SingleAccountRetryFromContext(ctx context.Context) (bool, bool) { + if md := metadataFromContext(ctx); md != nil && md.SingleAccountRetry != nil { + return *md.SingleAccountRetry, true + } + if ctx == nil { + return false, false + } + if value, ok := ctx.Value(ctxkey.SingleAccountRetry).(bool); ok { + requestMetadataFallbackSingleAccountRetryTotal.Add(1) + return value, true + } + return false, false +} + +func AccountSwitchCountFromContext(ctx context.Context) (int, bool) { + if md := metadataFromContext(ctx); md != nil && md.AccountSwitchCount != nil { + return *md.AccountSwitchCount, true + } + if ctx == nil { + return 0, false + } + v := ctx.Value(ctxkey.AccountSwitchCount) + switch t := v.(type) { + case int: + requestMetadataFallbackAccountSwitchCountTotal.Add(1) + return t, true + case int64: + requestMetadataFallbackAccountSwitchCountTotal.Add(1) + return int(t), true + } + return 0, false +} diff --git a/backend/internal/service/request_metadata_test.go b/backend/internal/service/request_metadata_test.go new file mode 100644 index 000000000..7d192699b --- /dev/null +++ b/backend/internal/service/request_metadata_test.go @@ -0,0 +1,119 @@ +package service + +import ( + "context" + "testing" + + "github.com/Wei-Shaw/sub2api/internal/pkg/ctxkey" + "github.com/stretchr/testify/require" +) + +func TestRequestMetadataWriteAndRead_NoBridge(t *testing.T) { + ctx := context.Background() + ctx = WithIsMaxTokensOneHaikuRequest(ctx, true, false) + ctx = WithThinkingEnabled(ctx, true, false) + ctx = WithPrefetchedStickySession(ctx, 123, 456, false) + ctx = WithSingleAccountRetry(ctx, true, false) + ctx = WithAccountSwitchCount(ctx, 2, false) + + isHaiku, ok := IsMaxTokensOneHaikuRequestFromContext(ctx) + require.True(t, ok) + require.True(t, isHaiku) + + thinking, ok := ThinkingEnabledFromContext(ctx) + require.True(t, ok) + require.True(t, thinking) + + accountID, ok := PrefetchedStickyAccountIDFromContext(ctx) + require.True(t, ok) + require.Equal(t, int64(123), accountID) + + groupID, ok := PrefetchedStickyGroupIDFromContext(ctx) + require.True(t, ok) + require.Equal(t, int64(456), groupID) + + singleRetry, ok := SingleAccountRetryFromContext(ctx) + require.True(t, ok) + require.True(t, singleRetry) + + switchCount, ok := AccountSwitchCountFromContext(ctx) + require.True(t, ok) + require.Equal(t, 2, switchCount) + + require.Nil(t, ctx.Value(ctxkey.IsMaxTokensOneHaikuRequest)) + require.Nil(t, ctx.Value(ctxkey.ThinkingEnabled)) + require.Nil(t, ctx.Value(ctxkey.PrefetchedStickyAccountID)) + require.Nil(t, ctx.Value(ctxkey.PrefetchedStickyGroupID)) + require.Nil(t, ctx.Value(ctxkey.SingleAccountRetry)) + require.Nil(t, ctx.Value(ctxkey.AccountSwitchCount)) +} + +func TestRequestMetadataWrite_BridgeLegacyKeys(t *testing.T) { + ctx := context.Background() + ctx = WithIsMaxTokensOneHaikuRequest(ctx, true, true) + ctx = WithThinkingEnabled(ctx, true, true) + ctx = WithPrefetchedStickySession(ctx, 123, 456, true) + ctx = WithSingleAccountRetry(ctx, true, true) + ctx = WithAccountSwitchCount(ctx, 2, true) + + require.Equal(t, true, ctx.Value(ctxkey.IsMaxTokensOneHaikuRequest)) + require.Equal(t, true, ctx.Value(ctxkey.ThinkingEnabled)) + require.Equal(t, int64(123), ctx.Value(ctxkey.PrefetchedStickyAccountID)) + require.Equal(t, int64(456), ctx.Value(ctxkey.PrefetchedStickyGroupID)) + require.Equal(t, true, ctx.Value(ctxkey.SingleAccountRetry)) + require.Equal(t, 2, ctx.Value(ctxkey.AccountSwitchCount)) +} + +func TestRequestMetadataRead_LegacyFallbackAndStats(t *testing.T) { + beforeHaiku, beforeThinking, beforeAccount, beforeGroup, beforeSingleRetry, beforeSwitchCount := RequestMetadataFallbackStats() + + ctx := context.Background() + ctx = context.WithValue(ctx, ctxkey.IsMaxTokensOneHaikuRequest, true) + ctx = context.WithValue(ctx, ctxkey.ThinkingEnabled, true) + ctx = context.WithValue(ctx, ctxkey.PrefetchedStickyAccountID, int64(321)) + ctx = context.WithValue(ctx, ctxkey.PrefetchedStickyGroupID, int64(654)) + ctx = context.WithValue(ctx, ctxkey.SingleAccountRetry, true) + ctx = context.WithValue(ctx, ctxkey.AccountSwitchCount, int64(3)) + + isHaiku, ok := IsMaxTokensOneHaikuRequestFromContext(ctx) + require.True(t, ok) + require.True(t, isHaiku) + + thinking, ok := ThinkingEnabledFromContext(ctx) + require.True(t, ok) + require.True(t, thinking) + + accountID, ok := PrefetchedStickyAccountIDFromContext(ctx) + require.True(t, ok) + require.Equal(t, int64(321), accountID) + + groupID, ok := PrefetchedStickyGroupIDFromContext(ctx) + require.True(t, ok) + require.Equal(t, int64(654), groupID) + + singleRetry, ok := SingleAccountRetryFromContext(ctx) + require.True(t, ok) + require.True(t, singleRetry) + + switchCount, ok := AccountSwitchCountFromContext(ctx) + require.True(t, ok) + require.Equal(t, 3, switchCount) + + afterHaiku, afterThinking, afterAccount, afterGroup, afterSingleRetry, afterSwitchCount := RequestMetadataFallbackStats() + require.Equal(t, beforeHaiku+1, afterHaiku) + require.Equal(t, beforeThinking+1, afterThinking) + require.Equal(t, beforeAccount+1, afterAccount) + require.Equal(t, beforeGroup+1, afterGroup) + require.Equal(t, beforeSingleRetry+1, afterSingleRetry) + require.Equal(t, beforeSwitchCount+1, afterSwitchCount) +} + +func TestRequestMetadataRead_PreferMetadataOverLegacy(t *testing.T) { + ctx := context.WithValue(context.Background(), ctxkey.ThinkingEnabled, false) + ctx = WithThinkingEnabled(ctx, true, false) + + thinking, ok := ThinkingEnabledFromContext(ctx) + require.True(t, ok) + require.True(t, thinking) + require.Equal(t, false, ctx.Value(ctxkey.ThinkingEnabled)) +} diff --git a/backend/internal/service/response_header_filter.go b/backend/internal/service/response_header_filter.go new file mode 100644 index 000000000..81012b012 --- /dev/null +++ b/backend/internal/service/response_header_filter.go @@ -0,0 +1,13 @@ +package service + +import ( + "github.com/Wei-Shaw/sub2api/internal/config" + "github.com/Wei-Shaw/sub2api/internal/util/responseheaders" +) + +func compileResponseHeaderFilter(cfg *config.Config) *responseheaders.CompiledHeaderFilter { + if cfg == nil { + return nil + } + return responseheaders.CompileHeaderFilter(cfg.Security.ResponseHeaders) +} diff --git a/backend/internal/util/logredact/redact.go b/backend/internal/util/logredact/redact.go index 492d875ce..9249b761c 100644 --- a/backend/internal/util/logredact/redact.go +++ b/backend/internal/util/logredact/redact.go @@ -3,7 +3,9 @@ package logredact import ( "encoding/json" "regexp" + "sort" "strings" + "sync" ) // maxRedactDepth 限制递归深度以防止栈溢出 @@ -31,9 +33,18 @@ var defaultSensitiveKeyList = []string{ "password", } +type textRedactPatterns struct { + reJSONLike *regexp.Regexp + reQueryLike *regexp.Regexp + rePlain *regexp.Regexp +} + var ( reGOCSPX = regexp.MustCompile(`GOCSPX-[0-9A-Za-z_-]{24,}`) reAIza = regexp.MustCompile(`AIza[0-9A-Za-z_-]{35}`) + + defaultTextRedactPatterns = compileTextRedactPatterns(nil) + extraTextPatternCache sync.Map // map[string]*textRedactPatterns ) func RedactMap(input map[string]any, extraKeys ...string) map[string]any { @@ -83,23 +94,71 @@ func RedactText(input string, extraKeys ...string) string { return RedactJSON(raw, extraKeys...) } - keyAlt := buildKeyAlternation(extraKeys) - // JSON-like: "access_token":"..." - reJSONLike := regexp.MustCompile(`(?i)("(?:` + keyAlt + `)"\s*:\s*")([^"]*)(")`) - // Query-like: access_token=... - reQueryLike := regexp.MustCompile(`(?i)\b((?:` + keyAlt + `))=([^&\s]+)`) - // Plain: access_token: ... / access_token = ... - rePlain := regexp.MustCompile(`(?i)\b((?:` + keyAlt + `))\b(\s*[:=]\s*)([^,\s]+)`) + patterns := getTextRedactPatterns(extraKeys) out := input out = reGOCSPX.ReplaceAllString(out, "GOCSPX-***") out = reAIza.ReplaceAllString(out, "AIza***") - out = reJSONLike.ReplaceAllString(out, `$1***$3`) - out = reQueryLike.ReplaceAllString(out, `$1=***`) - out = rePlain.ReplaceAllString(out, `$1$2***`) + out = patterns.reJSONLike.ReplaceAllString(out, `$1***$3`) + out = patterns.reQueryLike.ReplaceAllString(out, `$1=***`) + out = patterns.rePlain.ReplaceAllString(out, `$1$2***`) return out } +func compileTextRedactPatterns(extraKeys []string) *textRedactPatterns { + keyAlt := buildKeyAlternation(extraKeys) + return &textRedactPatterns{ + // JSON-like: "access_token":"..." + reJSONLike: regexp.MustCompile(`(?i)("(?:` + keyAlt + `)"\s*:\s*")([^"]*)(")`), + // Query-like: access_token=... + reQueryLike: regexp.MustCompile(`(?i)\b((?:` + keyAlt + `))=([^&\s]+)`), + // Plain: access_token: ... / access_token = ... + rePlain: regexp.MustCompile(`(?i)\b((?:` + keyAlt + `))\b(\s*[:=]\s*)([^,\s]+)`), + } +} + +func getTextRedactPatterns(extraKeys []string) *textRedactPatterns { + normalizedExtraKeys := normalizeAndSortExtraKeys(extraKeys) + if len(normalizedExtraKeys) == 0 { + return defaultTextRedactPatterns + } + + cacheKey := strings.Join(normalizedExtraKeys, ",") + if cached, ok := extraTextPatternCache.Load(cacheKey); ok { + if patterns, ok := cached.(*textRedactPatterns); ok { + return patterns + } + } + + compiled := compileTextRedactPatterns(normalizedExtraKeys) + actual, _ := extraTextPatternCache.LoadOrStore(cacheKey, compiled) + if patterns, ok := actual.(*textRedactPatterns); ok { + return patterns + } + return compiled +} + +func normalizeAndSortExtraKeys(extraKeys []string) []string { + if len(extraKeys) == 0 { + return nil + } + seen := make(map[string]struct{}, len(extraKeys)) + keys := make([]string, 0, len(extraKeys)) + for _, key := range extraKeys { + normalized := normalizeKey(key) + if normalized == "" { + continue + } + if _, ok := seen[normalized]; ok { + continue + } + seen[normalized] = struct{}{} + keys = append(keys, normalized) + } + sort.Strings(keys) + return keys +} + func buildKeyAlternation(extraKeys []string) string { seen := make(map[string]struct{}, len(defaultSensitiveKeyList)+len(extraKeys)) keys := make([]string, 0, len(defaultSensitiveKeyList)+len(extraKeys)) diff --git a/backend/internal/util/logredact/redact_test.go b/backend/internal/util/logredact/redact_test.go index 64a7b3cf2..266db69db 100644 --- a/backend/internal/util/logredact/redact_test.go +++ b/backend/internal/util/logredact/redact_test.go @@ -37,3 +37,48 @@ func TestRedactText_GOCSPX(t *testing.T) { t.Fatalf("expected key redacted, got %q", out) } } + +func TestRedactText_ExtraKeyCacheUsesNormalizedSortedKey(t *testing.T) { + clearExtraTextPatternCache() + + out1 := RedactText("custom_secret=abc", "Custom_Secret", " custom_secret ") + out2 := RedactText("custom_secret=xyz", "custom_secret") + if !strings.Contains(out1, "custom_secret=***") { + t.Fatalf("expected custom key redacted in first call, got %q", out1) + } + if !strings.Contains(out2, "custom_secret=***") { + t.Fatalf("expected custom key redacted in second call, got %q", out2) + } + + if got := countExtraTextPatternCacheEntries(); got != 1 { + t.Fatalf("expected 1 cached pattern set, got %d", got) + } +} + +func TestRedactText_DefaultPathDoesNotUseExtraCache(t *testing.T) { + clearExtraTextPatternCache() + + out := RedactText("access_token=abc") + if !strings.Contains(out, "access_token=***") { + t.Fatalf("expected default key redacted, got %q", out) + } + if got := countExtraTextPatternCacheEntries(); got != 0 { + t.Fatalf("expected extra cache to remain empty, got %d", got) + } +} + +func clearExtraTextPatternCache() { + extraTextPatternCache.Range(func(key, value any) bool { + extraTextPatternCache.Delete(key) + return true + }) +} + +func countExtraTextPatternCacheEntries() int { + count := 0 + extraTextPatternCache.Range(func(key, value any) bool { + count++ + return true + }) + return count +} diff --git a/backend/internal/util/responseheaders/responseheaders.go b/backend/internal/util/responseheaders/responseheaders.go index 86c3f6246..7f7baca65 100644 --- a/backend/internal/util/responseheaders/responseheaders.go +++ b/backend/internal/util/responseheaders/responseheaders.go @@ -41,7 +41,14 @@ var hopByHopHeaders = map[string]struct{}{ "connection": {}, } -func FilterHeaders(src http.Header, cfg config.ResponseHeaderConfig) http.Header { +type CompiledHeaderFilter struct { + allowed map[string]struct{} + forceRemove map[string]struct{} +} + +var defaultCompiledHeaderFilter = CompileHeaderFilter(config.ResponseHeaderConfig{}) + +func CompileHeaderFilter(cfg config.ResponseHeaderConfig) *CompiledHeaderFilter { allowed := make(map[string]struct{}, len(defaultAllowed)+len(cfg.AdditionalAllowed)) for key := range defaultAllowed { allowed[key] = struct{}{} @@ -69,13 +76,24 @@ func FilterHeaders(src http.Header, cfg config.ResponseHeaderConfig) http.Header } } + return &CompiledHeaderFilter{ + allowed: allowed, + forceRemove: forceRemove, + } +} + +func FilterHeaders(src http.Header, filter *CompiledHeaderFilter) http.Header { + if filter == nil { + filter = defaultCompiledHeaderFilter + } + filtered := make(http.Header, len(src)) for key, values := range src { lower := strings.ToLower(key) - if _, blocked := forceRemove[lower]; blocked { + if _, blocked := filter.forceRemove[lower]; blocked { continue } - if _, ok := allowed[lower]; !ok { + if _, ok := filter.allowed[lower]; !ok { continue } // 跳过 hop-by-hop 头部,这些由 HTTP 库自动处理 @@ -89,8 +107,8 @@ func FilterHeaders(src http.Header, cfg config.ResponseHeaderConfig) http.Header return filtered } -func WriteFilteredHeaders(dst http.Header, src http.Header, cfg config.ResponseHeaderConfig) { - filtered := FilterHeaders(src, cfg) +func WriteFilteredHeaders(dst http.Header, src http.Header, filter *CompiledHeaderFilter) { + filtered := FilterHeaders(src, filter) for key, values := range filtered { for _, value := range values { dst.Add(key, value) diff --git a/backend/internal/util/responseheaders/responseheaders_test.go b/backend/internal/util/responseheaders/responseheaders_test.go index f73432670..d817559e6 100644 --- a/backend/internal/util/responseheaders/responseheaders_test.go +++ b/backend/internal/util/responseheaders/responseheaders_test.go @@ -20,7 +20,7 @@ func TestFilterHeadersDisabledUsesDefaultAllowlist(t *testing.T) { ForceRemove: []string{"x-request-id"}, } - filtered := FilterHeaders(src, cfg) + filtered := FilterHeaders(src, CompileHeaderFilter(cfg)) if filtered.Get("Content-Type") != "application/json" { t.Fatalf("expected Content-Type passthrough, got %q", filtered.Get("Content-Type")) } @@ -51,7 +51,7 @@ func TestFilterHeadersEnabledUsesAllowlist(t *testing.T) { ForceRemove: []string{"x-remove"}, } - filtered := FilterHeaders(src, cfg) + filtered := FilterHeaders(src, CompileHeaderFilter(cfg)) if filtered.Get("Content-Type") != "application/json" { t.Fatalf("expected Content-Type allowed, got %q", filtered.Get("Content-Type")) } diff --git a/backend/migrations/062_add_scheduler_and_usage_composite_indexes_notx.sql b/backend/migrations/062_add_scheduler_and_usage_composite_indexes_notx.sql new file mode 100644 index 000000000..c6139338b --- /dev/null +++ b/backend/migrations/062_add_scheduler_and_usage_composite_indexes_notx.sql @@ -0,0 +1,15 @@ +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_accounts_schedulable_hot + ON accounts (platform, priority) + WHERE deleted_at IS NULL AND status = 'active' AND schedulable = true; + +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_accounts_active_schedulable + ON accounts (priority, status) + WHERE deleted_at IS NULL AND schedulable = true; + +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_user_subscriptions_user_status_expires_active + ON user_subscriptions (user_id, status, expires_at) + WHERE deleted_at IS NULL; + +CREATE INDEX CONCURRENTLY IF NOT EXISTS idx_usage_logs_group_created_at_not_null + ON usage_logs (group_id, created_at) + WHERE group_id IS NOT NULL; diff --git a/backend/migrations/README.md b/backend/migrations/README.md index 3fe328e6e..47f6fa358 100644 --- a/backend/migrations/README.md +++ b/backend/migrations/README.md @@ -12,6 +12,26 @@ Format: `NNN_description.sql` Example: `017_add_gemini_tier_id.sql` +### `_notx.sql` 命名与执行语义(并发索引专用) + +当迁移包含 `CREATE INDEX CONCURRENTLY` 或 `DROP INDEX CONCURRENTLY` 时,必须使用 `_notx.sql` 后缀,例如: + +- `062_add_accounts_priority_indexes_notx.sql` +- `063_drop_legacy_indexes_notx.sql` + +运行规则: + +1. `*.sql`(不带 `_notx`)按事务执行。 +2. `*_notx.sql` 按非事务执行,不会包裹在 `BEGIN/COMMIT` 中。 +3. `*_notx.sql` 仅允许并发索引语句,不允许混入事务控制语句或其他 DDL/DML。 + +幂等要求(必须): + +- 创建索引:`CREATE INDEX CONCURRENTLY IF NOT EXISTS ...` +- 删除索引:`DROP INDEX CONCURRENTLY IF EXISTS ...` + +这样可以保证灾备重放、重复执行时不会因对象已存在/不存在而失败。 + ## Migration File Structure ```sql From 8998ad96e210dd049ea347e9681e910089357085 Mon Sep 17 00:00:00 2001 From: yangjianbo Date: Fri, 27 Feb 2026 08:35:51 +0800 Subject: [PATCH 034/120] =?UTF-8?q?feat(frontend):=20=E4=BC=98=E5=8C=96?= =?UTF-8?q?=E8=B4=A6=E5=8F=B7=E7=AE=A1=E7=90=86=E9=A1=B5=E6=80=A7=E8=83=BD?= =?UTF-8?q?=E4=B8=8E=E7=BB=9F=E8=AE=A1=E5=B1=95=E7=A4=BA?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- frontend/src/api/admin/accounts.ts | 17 ++++ .../account/AccountTodayStatsCell.vue | 59 +++++------ frontend/src/views/admin/AccountsView.vue | 97 +++++++++++++++++-- 3 files changed, 127 insertions(+), 46 deletions(-) diff --git a/frontend/src/api/admin/accounts.ts b/frontend/src/api/admin/accounts.ts index 1b8ae9ad4..565716999 100644 --- a/frontend/src/api/admin/accounts.ts +++ b/frontend/src/api/admin/accounts.ts @@ -369,6 +369,22 @@ export async function getTodayStats(id: number): Promise { return data } +export interface BatchTodayStatsResponse { + stats: Record +} + +/** + * 批量获取多个账号的今日统计 + * @param accountIds - 账号 ID 列表 + * @returns 以账号 ID(字符串)为键的统计映射 + */ +export async function getBatchTodayStats(accountIds: number[]): Promise { + const { data } = await apiClient.post('/admin/accounts/today-stats/batch', { + account_ids: accountIds + }) + return data +} + /** * Set account schedulable status * @param id - Account ID @@ -556,6 +572,7 @@ export const accountsAPI = { clearError, getUsage, getTodayStats, + getBatchTodayStats, clearRateLimit, getTempUnschedulableStatus, resetTempUnschedulable, diff --git a/frontend/src/components/account/AccountTodayStatsCell.vue b/frontend/src/components/account/AccountTodayStatsCell.vue index a920f3144..a422d1f00 100644 --- a/frontend/src/components/account/AccountTodayStatsCell.vue +++ b/frontend/src/components/account/AccountTodayStatsCell.vue @@ -1,26 +1,26 @@ diff --git a/frontend/src/views/admin/AccountsView.vue b/frontend/src/views/admin/AccountsView.vue index 236c6f54c..23301616a 100644 --- a/frontend/src/views/admin/AccountsView.vue +++ b/frontend/src/views/admin/AccountsView.vue @@ -184,7 +184,11 @@ + + + \ No newline at end of file diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-account-apikey/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-account-apikey/spec.md new file mode 100644 index 000000000..e322009b8 --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-account-apikey/spec.md @@ -0,0 +1,82 @@ +## ADDED Requirements + +### Requirement: Sora 平台支持 API Key 账号类型 +系统 SHALL 为 Sora 平台新增 "API Key / 上游透传" 账号类型,取消现有 OAuth 硬编码限制。 + +#### Scenario: 前端创建 Sora API Key 账号 +- **WHEN** 管理员在账号创建对话框中选择 Sora 平台 +- **THEN** 系统 SHALL 显示两个账号类别选项卡:"OAuth 认证"和"API Key / 上游透传" +- **AND** 选择"API Key / 上游透传"时 SHALL 显示 `Base URL`(必填)和 `API Key`(必填)表单字段 +- **AND** 提交时 `form.type` SHALL 设置为 `'apikey'` + +#### Scenario: Base URL 字段校验 +- **WHEN** 管理员创建或编辑 `platform=sora, type=apikey` 账号 +- **THEN** `base_url` SHALL 为必填 +- **AND** `base_url` SHALL 以 `http://` 或 `https://` 开头 +- **AND** 不满足校验时 SHALL 拒绝保存并提示明确错误 + +#### Scenario: 取消 Sora OAuth 硬编码 +- **WHEN** 用户选择 Sora 平台 +- **THEN** 系统 SHALL 不再强制设置 `form.type = 'oauth'` +- **AND** SHALL 允许用户选择 OAuth 或 API Key 类型 + +### Requirement: Sora API Key 账号编辑 +系统 SHALL 支持编辑 Sora API Key 类型账号的 `base_url` 和 `api_key`。 + +#### Scenario: 编辑 Sora API Key 账号 +- **WHEN** 管理员编辑一个 `platform=sora, type=apikey` 的账号 +- **THEN** 编辑界面 SHALL 显示 `Base URL` 和 `API Key` 可编辑字段 +- **AND** 保存时 SHALL 更新 `credentials` 中的 `base_url` 和 `api_key` + +### Requirement: Sora API Key 账号连通性测试 +系统 SHALL 支持 Sora API Key 账号的连通性测试。 + +#### Scenario: 测试连通性成功 +- **WHEN** 管理员点击"测试连接" +- **AND** 上游 `base_url` 可达且 `api_key` 有效 +- **THEN** 系统 SHALL 发送轻量级请求到上游验证连通性 +- **AND** 返回测试成功结果 + +#### Scenario: 测试连通性失败 +- **WHEN** 上游不可达或认证失败 +- **THEN** 系统 SHALL 返回明确的错误信息(如"连接超时"、"认证失败") + +### Requirement: Sora apikey 账号 HTTP 透传 +系统 SHALL 对 `type=apikey` 的 Sora 账号执行 HTTP 透传,而非 SDK 直连。 + +#### Scenario: apikey 账号走 HTTP 透传 +- **WHEN** `SoraGatewayService.Forward()` 检测到 `account.Type == "apikey"` 且 `account.GetBaseURL() != ""` +- **THEN** 系统 SHALL 调用 `forwardToUpstream()` 方法 +- **AND** SHALL 不使用 `SoraSDKClient` 直连 + +#### Scenario: HTTP 透传请求构造 +- **WHEN** 系统执行 `forwardToUpstream()` +- **THEN** 系统 SHALL 构造 HTTP POST 请求到规范化拼接的 `{base_url}/sora/v1/chat/completions` +- **AND** Header SHALL 包含 `Authorization: Bearer ` 和 `Content-Type: application/json` +- **AND** 请求体 SHALL 原样透传客户端请求体 + +#### Scenario: 流式响应透传 +- **WHEN** 上游返回流式 SSE 响应 +- **THEN** 系统 SHALL 逐字节透传 SSE 流到客户端 +- **AND** SHALL 不缓存完整响应 + +#### Scenario: 非流式响应透传 +- **WHEN** 上游返回非流式 JSON 响应 +- **THEN** 系统 SHALL 读取完整响应后原样返回客户端 + +#### Scenario: 上游错误触发失败转移 +- **WHEN** 上游返回 401/403/429/5xx 错误 +- **THEN** 系统 SHALL 复用现有的 `UpstreamFailoverError` 机制触发账号切换 + +### Requirement: sub2api 二级桥接 +系统 SHALL 通过 API Key 账号类型天然支持 sub2api 级联部署。 + +#### Scenario: 分站通过 API Key 连接总站 +- **WHEN** 分站创建 Sora API Key 账号,`base_url` 指向总站地址 +- **THEN** 分站的 Sora 请求 SHALL 通过 HTTP 透传到总站的 `/sora/v1/chat/completions` +- **AND** 总站 SHALL 使用自己的 OAuth 账号连接 OpenAI + +#### Scenario: 级联中的存储独立性 +- **WHEN** 分站收到总站返回的生成结果 +- **THEN** 分站 SHALL 根据自己的 S3 配置决定是否存储 +- **AND** 存储行为与总站无关(完全独立) diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-client-ui/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-client-ui/spec.md new file mode 100644 index 000000000..7168afbd0 --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-client-ui/spec.md @@ -0,0 +1,286 @@ +## ADDED Requirements + +### Requirement: Sora 客户端路由与菜单 +系统 SHALL 在前端新增 Sora 客户端页面,可通过侧边栏菜单访问。 + +#### Scenario: 路由注册 +- **WHEN** 前端路由初始化 +- **THEN** 系统 SHALL 注册 `/sora` 路由,加载 `SoraView.vue` 页面 +- **AND** 路由 SHALL 需要用户登录认证 + +#### Scenario: 侧边栏菜单项 +- **WHEN** 用户登录后查看侧边栏 +- **THEN** 侧边栏 SHALL 显示"Sora"菜单项 +- **AND** 点击后 SHALL 跳转到 `/sora` 页面 + +### Requirement: Sora 客户端导航栏 +系统 SHALL 在 Sora 客户端页面顶部显示导航栏,包含 Tab 切换和配额信息。 + +#### Scenario: 导航栏显示 +- **WHEN** 用户进入 Sora 客户端页面 +- **THEN** 顶部 SHALL 显示导航栏,包含 Logo "Sora"、"生成"/"作品库" Tab 切换 +- **AND** 右侧 SHALL 显示配额进度条(如 "2.1GB / 5GB")和用户头像 + +#### Scenario: Tab 切换 +- **WHEN** 用户点击"生成"或"作品库" Tab +- **THEN** 页面 SHALL 切换到对应视图,不刷新页面 + +### Requirement: 生成页面 - 底部创作栏 +系统 SHALL 在生成页底部固定显示创作栏,用于输入提示词和配置生成参数。 + +#### Scenario: 提示词输入 +- **WHEN** 用户在创作栏输入提示词 +- **THEN** 输入框 SHALL 支持多行文本,自动扩展高度 +- **AND** 支持 Ctrl/Cmd + Enter 快捷键触发生成 + +#### Scenario: 模型选择 +- **WHEN** 用户点击模型选择器 +- **THEN** 系统 SHALL 从 `GET /api/v1/sora/models` 获取可用模型列表 +- **AND** 下拉菜单 SHALL 按视频模型和图片模型分组显示 + +#### Scenario: 视频参数配置 +- **WHEN** 用户选择视频模型 +- **THEN** 创作栏 SHALL 显示方向选择(横屏/竖屏/方形)和时长选择(10s/15s/25s) + +#### Scenario: 图片模型隐藏视频参数 +- **WHEN** 用户选择图片模型(如 gpt-image-1) +- **THEN** 创作栏 SHALL 隐藏方向选择和时长选择 + +#### Scenario: 参考图上传 +- **WHEN** 用户点击图片上传按钮 +- **THEN** 系统 SHALL 允许上传参考图片,作为生成输入的 `image_url` + +### Requirement: 生成页面 - 发起生成 +系统 SHALL 通过底部创作栏的"生成"按钮发起 Sora 生成请求。 + +#### Scenario: 发起视频生成 +- **WHEN** 用户填写提示词并点击"生成"按钮 +- **AND** 当前选择视频模型 +- **THEN** 系统 SHALL 发送 `POST /api/v1/sora/generate`,包含 `prompt`、`model`、`media_type=video`、方向、时长参数 +- **AND** 页面 SHALL 显示新的进度卡片(pending 状态) + +#### Scenario: 发起图片生成 +- **WHEN** 用户填写提示词并选择图片模型后点击"生成" +- **THEN** 系统 SHALL 发送生成请求,`media_type=image` +- **AND** 页面 SHALL 显示新的进度卡片 + +#### Scenario: 配额不足预防与提示 +- **WHEN** 用户配额使用率超过 90% +- **THEN** 配额进度条 SHALL 变为黄色警告色,提示"存储空间即将用完" +- **AND** 配额使用率达 100% 时,"生成"按钮 SHALL 禁用并显示 tooltip "存储配额已满" + +#### Scenario: 配额不足错误引导 +- **WHEN** 生成请求返回 HTTP 429(配额不足) +- **THEN** 页面 SHALL 弹出配额不足对话框,包含: + - 当前配额使用详情(已用 / 总配额) + - 引导文案"您可以在作品库中删除不需要的作品来释放存储空间" + - "前往作品库"按钮(直接跳转到作品库页面) + +### Requirement: 生成页面 - 进度展示 +系统 SHALL 在生成页中间区域实时展示当前生成任务的进度状态。 + +#### Scenario: 排队中状态 +- **WHEN** 生成记录 `status = 'pending'` +- **THEN** 进度卡片 SHALL 显示"排队中"状态、灰色状态指示、提示词摘要(前 50 字) +- **AND** SHALL 显示"取消"按钮 + +#### Scenario: 生成中状态 +- **WHEN** 生成记录 `status = 'generating'` +- **THEN** 进度卡片 SHALL 显示"生成中"动画、提示词预览 +- **AND** SHALL 显示已等待时长(如"已等待 3:42")和预估剩余时间(如"预计剩余 8 分钟") +- **AND** SHALL 显示"取消"按钮 +- **AND** 超过 20 分钟未完成时 SHALL 显示"生成时间异常,建议取消重试" + +#### Scenario: 生成完成 - 自动保存成功 +- **WHEN** 生成记录 `status = 'completed'` 且 `storage_type = 's3'` +- **THEN** 进度卡片 SHALL 显示生成结果预览(视频播放器或图片缩略图) +- **AND** SHALL 显示 "✓ 已保存到云端" 状态标识 +- **AND** SHALL 提供"📥 本地下载"按钮 +- **AND** 作品自动出现在作品库中 + +#### Scenario: 生成完成 - 降级本地存储 +- **WHEN** 生成记录 `status = 'completed'` 且 `storage_type = 'local'` +- **THEN** 进度卡片 SHALL 显示 "✓ 已保存到本地" 状态标识 +- **AND** SHALL 提供"📥 本地下载"按钮 + +#### Scenario: 生成完成 - 无存储(upstream) +- **WHEN** 生成记录 `status = 'completed'` 且 `storage_type = 'upstream'` +- **THEN** 进度卡片 SHALL 显示"📥 本地下载"按钮 +- **AND** SHALL 显示 15 分钟过期倒计时进度条(基于 `completed_at` 计算) +- **AND** 若 S3 当前可用,SHALL 显示可点击的"☁️ 保存到存储"按钮 +- **AND** 若 S3 不可用,"☁️ 保存到存储"按钮 SHALL 禁用并 tooltip "管理员未开通云存储" +- **AND** 倒计时结束后 SHALL 禁用所有按钮并显示"链接已过期" + +#### Scenario: 生成失败状态 +- **WHEN** 生成记录 `status = 'failed'` +- **THEN** 进度卡片 SHALL 显示分类错误信息: + - 上游服务错误 → "服务暂时不可用,建议稍后重试" + - 内容审核不通过 → "提示词包含不支持的内容,请修改后重试" + - 超时 → "生成超时,建议降低分辨率或时长后重试" +- **AND** SHALL 提供"重试"按钮(一键以相同参数重新发起) +- **AND** SHALL 提供"编辑后重试"按钮(将参数回填到创作栏) +- **AND** SHALL 提供"删除"按钮 + +#### Scenario: 任务取消状态 +- **WHEN** 生成记录 `status = 'cancelled'` +- **THEN** 进度卡片 SHALL 显示"已取消"灰色状态 +- **AND** SHALL 提供"重新生成"和"删除"按钮 + +### Requirement: 生成页面 - 多任务管理与状态恢复 +系统 SHALL 支持多个并发生成任务的展示和页面刷新后的状态恢复。 + +#### Scenario: 多任务并发展示 +- **WHEN** 用户有多个进行中或刚完成的生成任务 +- **THEN** 生成页中间区域 SHALL 以时间线方式纵向排列所有任务卡片,最新在最上方 +- **AND** 底部创作栏 SHALL 显示当前活跃任务数(如"正在生成 2/3") +- **AND** 超过并发上限(3 个)时,"生成"按钮 SHALL 禁用并提示"请等待当前任务完成" + +#### Scenario: 页面刷新后恢复任务 +- **WHEN** 用户刷新页面或重新进入 Sora 客户端 +- **THEN** 系统 SHALL 调用 `GET /api/v1/sora/generations?status=pending,generating` 获取进行中任务 +- **AND** SHALL 自动恢复所有进度卡片的显示 +- **AND** SHALL 继续对进行中任务执行轮询 + +#### Scenario: 前端轮询策略 +- **WHEN** 存在 pending 或 generating 状态的任务 +- **THEN** 前端 SHALL 按递减频率轮询 `GET /api/v1/sora/generations/:id`: + - 0-2 分钟:每 3 秒 + - 2-10 分钟:每 10 秒 + - 10-30 分钟:每 30 秒 +- **AND** 每次轮询结果 SHALL 更新卡片显示 +- **AND** 卡片上 SHALL 显示"最后更新:N 秒前"以确认数据实时性 + +#### Scenario: 浏览器通知 +- **WHEN** 生成任务完成或失败 +- **AND** 浏览器标签页不在前台 +- **THEN** 系统 SHALL 通过 Notification API 发送桌面通知 +- **AND** 标签页 title SHALL 闪烁提示(如"(1) ✓ 生成完成 - Sora") + +### Requirement: 生成页面 - 无存储提醒 +系统 SHALL 在未配置存储时显示醒目提示。 + +#### Scenario: 无存储警告 +- **WHEN** 用户进入生成页 +- **AND** S3 和本地存储均未配置 +- **THEN** 创作栏 SHALL 显示警告标签"存储未配置,生成后请立即下载" + +#### Scenario: S3 可用时自动保存(正常模式) +- **WHEN** 管理员已开通 S3 存储 +- **AND** 用户存储配额未超限 +- **THEN** 生成完成后系统 SHALL 自动上传到 S3 +- **AND** 卡片 SHALL 显示"✓ 已保存到云端" + +#### Scenario: S3 不可用时的降级提示 +- **WHEN** 管理员未开通 S3 存储(`sora_s3_enabled = false`) +- **THEN** 生成完成后卡片 SHALL 仅显示"📥 本地下载"按钮 +- **AND** "☁️ 保存到存储"按钮 SHALL 禁用并 tooltip "管理员未开通云存储" + +#### Scenario: 手动保存到存储(仅 upstream 记录) +- **WHEN** 生成记录 `storage_type = 'upstream'` 且 S3 当前可用 +- **THEN** "☁️ 保存到存储"按钮 SHALL 可点击 +- **AND** 点击后 SHALL 调用 `POST /api/v1/sora/generations/:id/save` +- **AND** 上传过程中按钮 SHALL 显示 loading 状态 +- **AND** 上传成功后按钮 SHALL 变为"✓ 已保存" +- **AND** 上传失败 SHALL 显示错误信息并允许重试 + +#### Scenario: 无存储生成完成自动提示下载 +- **WHEN** 生成完成且 `storage_type = 'upstream'` +- **THEN** 客户端 SHALL 弹出提醒弹窗"文件仅临时保存,请在 15 分钟内下载" +- **AND** SHALL 显示 15 分钟倒计时 + +#### Scenario: 离开页面未下载警告 +- **WHEN** 存在 `storage_type = 'upstream'` 且未过期的完成记录 +- **AND** 用户尝试离开或关闭页面 +- **THEN** 系统 SHALL 触发 `beforeunload` 事件警告"您有未下载的生成结果,离开后可能丢失" + +### Requirement: 作品库页面 - 网格展示 +系统 SHALL 在作品库页面以网格布局展示用户的历史生成作品。 + +#### Scenario: 作品网格显示 +- **WHEN** 用户切换到"作品库" Tab +- **THEN** 系统 SHALL 从 `GET /api/v1/sora/generations?storage_type=s3,local` 获取已保存记录 +- **AND** SHALL 以响应式网格展示作品卡片(桌面 4 列、平板 3 列、移动端 1-2 列) +- **AND** `storage_type = 'upstream'` 或 `'none'` 的记录 SHALL 不在作品库中显示 +- **AND** S3 作品的 URL SHALL 由后端每次请求时动态生成(避免预签名过期) + +#### Scenario: 作品卡片信息 +- **WHEN** 作品卡片渲染 +- **THEN** 每张卡片 SHALL 显示:缩略图/视频预览、类型角标(VIDEO/IMAGE)、模型名称、生成时间 +- **AND** 视频卡片 SHALL 显示播放按钮和时长标签 + +#### Scenario: 卡片 hover 操作 +- **WHEN** 用户 hover 作品卡片 +- **THEN** SHALL 显示"📥 本地下载"和"🗑 删除"操作按钮 +- **AND** 缩略图 SHALL 轻微放大效果(scale 1.05,transition 0.2s) + +### Requirement: 作品库页面 - 筛选 +系统 SHALL 支持按类型筛选作品。 + +#### Scenario: 全部/视频/图片筛选 +- **WHEN** 用户点击筛选按钮(全部/视频/图片) +- **THEN** 作品网格 SHALL 只显示对应类型的记录 +- **AND** SHALL 更新显示作品数量 + +#### Scenario: 空状态 +- **WHEN** 筛选结果为空或用户无任何生成记录 +- **THEN** 页面 SHALL 显示空状态引导(图标 + "暂无作品" + "开始创作"按钮) + +### Requirement: 作品详情与操作 +系统 SHALL 支持查看作品详情和执行下载、删除操作。 + +#### Scenario: 查看作品详情 +- **WHEN** 用户点击作品卡片 +- **THEN** 系统 SHALL 弹出预览弹窗,显示完整的媒体内容、提示词、模型信息、生成时间 + +#### Scenario: 本地下载作品 +- **WHEN** 用户点击"本地下载"按钮 +- **THEN** 系统 SHALL 触发浏览器下载对应媒体文件 + +#### Scenario: 保存作品到存储 +- **WHEN** 用户点击"保存到存储"按钮 +- **AND** 管理员已开通 S3 存储 +- **THEN** 系统 SHALL 将媒体文件上传到 S3 +- **AND** 更新生成记录的 `storage_type`、`s3_object_keys` +- **AND** 累加用户存储配额 + +#### Scenario: 删除作品 +- **WHEN** 用户点击删除按钮 +- **THEN** 系统 SHALL 弹出确认对话框 +- **AND** 确认后调用 `DELETE /api/v1/sora/generations/:id` +- **AND** 删除成功后 SHALL 从网格中移除卡片并更新配额显示 + +### Requirement: 暗色主题设计 +系统 SHALL 采用参考 Sora 官方客户端的暗色主题设计。 + +#### Scenario: 暗色主题样式 +- **WHEN** 用户访问 Sora 客户端页面 +- **THEN** 页面背景 SHALL 为深黑色(`#0D0D0D`) +- **AND** 文字 SHALL 为白色/浅灰色 +- **AND** 卡片和输入框 SHALL 使用多层次灰色(`#1A1A1A`、`#242424`、`#2A2A2A`) +- **AND** 导航栏 SHALL 有毛玻璃效果(`backdrop-filter: blur`) + +### Requirement: 响应式布局 +系统 SHALL 支持不同屏幕尺寸下的自适应布局。 + +#### Scenario: 桌面端布局 +- **WHEN** 屏幕宽度 > 1200px +- **THEN** 作品网格 SHALL 显示 4 列 + +#### Scenario: 平板端布局 +- **WHEN** 屏幕宽度 900px - 1200px +- **THEN** 作品网格 SHALL 调整为 3 列 + +#### Scenario: 移动端布局 +- **WHEN** 屏幕宽度 < 600px +- **THEN** 作品网格 SHALL 调整为 1-2 列 + +### Requirement: 国际化支持 +系统 SHALL 为 Sora 客户端所有文案提供中英文国际化支持。 + +#### Scenario: 中文环境 +- **WHEN** 系统语言设置为中文 +- **THEN** 所有 Sora 客户端文案 SHALL 显示中文 + +#### Scenario: 英文环境 +- **WHEN** 系统语言设置为英文 +- **THEN** 所有 Sora 客户端文案 SHALL 显示英文 diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-generation-gateway/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-generation-gateway/spec.md new file mode 100644 index 000000000..b6574ab28 --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-generation-gateway/spec.md @@ -0,0 +1,129 @@ +## MODIFIED Requirements + +### Requirement: Sora 生成网关入口 +系统 SHALL 提供 `POST /v1/chat/completions` 作为 Sora 生成入口(仅限 `platform=sora` 分组)。 + +#### Scenario: Sora 分组调用 /v1/chat/completions +- **WHEN** 请求的 API Key 分组平台为 `sora` +- **AND** 请求体包含 `model` 与 `messages` +- **THEN** 网关按 Sora 规则处理并返回流式或非流式结果 +- **AND** 若生成需要流式,网关 SHALL 强制 `stream=true` 或返回明确提示 + +#### Scenario: Sora 专用路由调用 /sora/v1/chat/completions +- **WHEN** 客户端请求 `POST /sora/v1/chat/completions` +- **THEN** 网关 SHALL 强制使用 `platform=sora` 的调度与生成逻辑 + +#### Scenario: 非流式请求策略 +- **WHEN** 客户端请求 `stream=false` +- **THEN** 网关 SHALL 选择"强制流式并聚合"或"返回明确错误",并在文档中一致说明 +- **AND** 默认策略 SHALL 为"强制流式并聚合" + +#### Scenario: 非 Sora 分组调用 /v1/chat/completions +- **WHEN** 请求的 API Key 分组平台不为 `sora` +- **THEN** 网关 SHALL 返回 4xx 并提示不支持该平台 + +#### Scenario: API Key 直接调用不存储不记录 +- **WHEN** 请求通过 `/sora/v1/chat/completions`(API Key 直接调用路径) +- **THEN** 网关 SHALL 不将媒体文件上传到 S3 +- **AND** SHALL 不执行本地磁盘媒体落盘 +- **AND** SHALL 不写入 `sora_generations` 表 +- **AND** SHALL 不检查存储配额 +- **AND** SHALL 直接返回上游 URL(保持现有行为) + +### Requirement: Sora 调度与失败切换 +系统 SHALL 对 Sora 账号执行调度、并发控制、失败切换,与 OpenAI 调度一致。 + +#### Scenario: 账号可用时成功调度 +- **WHEN** 至少存在一个可调度的 Sora 账号 +- **THEN** 选择优先级最高且最近未使用的账号,并在完成后刷新 LRU + +#### Scenario: 上游失败触发切换 +- **WHEN** 上游返回 401/403/429/5xx +- **THEN** 网关 SHALL 切换账号并重试,直到达到最大切换次数 + +#### Scenario: apikey 类型账号调度到 HTTP 透传 +- **WHEN** 调度选中的 Sora 账号 `type = 'apikey'` 且 `base_url` 非空 +- **THEN** 网关 SHALL 调用 `forwardToUpstream()` 执行 HTTP 透传 +- **AND** SHALL 不使用 `SoraSDKClient` 直连 + +## ADDED Requirements + +### Requirement: Sora 客户端生成入口 +系统 SHALL 提供 `POST /api/v1/sora/generate` 作为客户端 UI 专用生成入口。 + +#### Scenario: 客户端 UI 调用生成 +- **WHEN** 用户通过 Sora 客户端 UI 发起生成请求 +- **THEN** 系统 SHALL 接受请求并内部调用现有 `SoraGatewayService.Forward()` 完成生成 +- **AND** 在上层包装存储/记录/配额逻辑 + +#### Scenario: 客户端生成流程(异步) +- **WHEN** `POST /api/v1/sora/generate` 收到请求 +- **THEN** 系统 SHALL 按以下顺序执行: + 1. 检查存储配额(有效配额 > 0 时) + 2. 检查用户当前 pending+generating 任务数不超过 3 + 3. 创建 `sora_generations` 记录(status=pending) + 4. **立即返回** `{ generation_id, status: "pending" }` 给前端 + 5. 后台异步:内部调用 `SoraGatewayService.Forward()` 获取上游媒体 URL(不在该步骤落盘) + 6. 后台异步:自动上传媒体到 S3(若可用),否则降级到本地/上游 URL + 7. 后台异步:更新生成记录(status、media_url、storage_type、file_size 等) + 8. 后台异步:累加存储配额(仅 S3/本地存储时) + +#### Scenario: 前端轮询生成状态 +- **WHEN** 前端需要获取生成任务最新状态 +- **THEN** 系统 SHALL 通过 `GET /api/v1/sora/generations/:id` 返回完整记录 +- **AND** 前端 SHALL 按递减频率轮询(3s → 10s → 30s) + +#### Scenario: 并发生成上限 +- **WHEN** 用户 pending+generating 状态的任务已达 3 个 +- **THEN** 系统 SHALL 返回 HTTP 429 + "请等待当前任务完成后再发起新任务" + +### Requirement: Sora 可用模型列表 API +系统 SHALL 提供 `GET /api/v1/sora/models` 供客户端 UI 获取可用模型。 + +#### Scenario: 获取可用 Sora 模型 +- **WHEN** 用户请求 `GET /api/v1/sora/models` +- **THEN** 系统 SHALL 返回系统内置的 Sora 模型列表 +- **AND** 每个模型 SHALL 包含 `id`、`name`、`media_type`(video/image)、`description` + +### Requirement: 手动保存到存储 +系统 SHALL 提供 `POST /api/v1/sora/generations/:id/save` 供用户将未自动保存的作品手动上传到 S3。 + +#### Scenario: 手动保存 upstream 记录到 S3 +- **WHEN** 用户请求 `POST /api/v1/sora/generations/:id/save` +- **AND** 该记录 `storage_type = 'upstream'` 且 `media_url` 未过期 +- **AND** S3 存储当前可用 +- **THEN** 系统 SHALL 从 `media_url` 下载媒体并上传到 S3 +- **AND** 更新记录 `storage_type = 's3'`、`s3_object_keys`、`file_size_bytes` +- **AND** 累加用户存储配额 + +#### Scenario: 手动保存时 URL 已过期 +- **WHEN** 上游 URL 已过期(下载返回 403/404) +- **THEN** 系统 SHALL 返回 HTTP 410 + "媒体链接已过期,无法保存" + +#### Scenario: 手动保存时 S3 不可用 +- **WHEN** S3 存储未启用或配置不完整 +- **THEN** 系统 SHALL 返回 HTTP 503 + "云存储未配置,请联系管理员" + +### Requirement: 取消生成任务 +系统 SHALL 提供 `POST /api/v1/sora/generations/:id/cancel` 供用户取消进行中的生成任务。 + +#### Scenario: 取消 pending/generating 状态的任务 +- **WHEN** 用户请求 `POST /api/v1/sora/generations/:id/cancel` +- **AND** 该记录 `status` 为 `pending` 或 `generating` +- **THEN** 系统 SHALL 将记录状态更新为 `cancelled` +- **AND** SHALL 不累加任何存储配额 +- **AND** 若上游任务已提交,后续返回的结果 SHALL 被忽略 + +#### Scenario: 取消非活跃状态的任务 +- **WHEN** 该记录 `status` 为 `completed`、`failed` 或 `cancelled` +- **THEN** 系统 SHALL 返回 HTTP 409 + "任务已结束,无法取消" + +### Requirement: 存储状态查询 +系统 SHALL 提供 `GET /api/v1/sora/storage-status` 供前端查询当前存储可用性。 + +#### Scenario: 查询存储状态 +- **WHEN** 用户请求 `GET /api/v1/sora/storage-status` +- **THEN** 系统 SHALL 返回 `{ s3_enabled, s3_healthy, local_enabled }` +- **AND** `s3_enabled` 表示管理员是否启用 S3 +- **AND** `s3_healthy` 表示 S3 客户端是否初始化成功 +- **AND** `local_enabled` 表示本地存储是否可用 diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-generation-history/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-generation-history/spec.md new file mode 100644 index 000000000..5a36554c1 --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-generation-history/spec.md @@ -0,0 +1,138 @@ +## ADDED Requirements + +### Requirement: 生成记录数据模型 +系统 SHALL 新建 `sora_generations` 表存储每次 Sora 客户端 UI 生成的元数据。 + +#### Scenario: 数据库表创建 +- **WHEN** 数据库迁移执行 +- **THEN** 系统 SHALL 创建 `sora_generations` 表,包含以下字段: + - `id` (BIGSERIAL PRIMARY KEY) + - `user_id` (BIGINT NOT NULL, FK → users.id ON DELETE CASCADE) + - `api_key_id` (BIGINT, 可空) + - `model` (VARCHAR(64) NOT NULL) + - `prompt` (TEXT NOT NULL DEFAULT '') + - `media_type` (VARCHAR(16) NOT NULL DEFAULT 'video') + - `status` (VARCHAR(16) NOT NULL DEFAULT 'pending') + - `media_url` (TEXT NOT NULL DEFAULT '') + - `media_urls` (JSONB, 多图 URL 数组) + - `file_size_bytes` (BIGINT NOT NULL DEFAULT 0) + - `storage_type` (VARCHAR(16) NOT NULL DEFAULT 'none') + - `s3_object_keys` (JSONB, S3 object key 数组) + - `upstream_task_id` (VARCHAR(128) NOT NULL DEFAULT '') + - `error_message` (TEXT NOT NULL DEFAULT '') + - `created_at` (TIMESTAMPTZ NOT NULL DEFAULT NOW()) + - `completed_at` (TIMESTAMPTZ) +- **AND** SHALL 创建 `(user_id, created_at DESC)` 普通索引(非唯一) +- **AND** SHALL 创建 `(user_id, status)` 索引 + +### Requirement: 创建生成记录 +系统 SHALL 在客户端 UI 发起生成时创建记录,并在生成过程中更新状态。 + +#### Scenario: 发起生成时创建 pending 记录 +- **WHEN** 用户通过 `POST /api/v1/sora/generate` 发起生成 +- **THEN** 系统 SHALL 在 `sora_generations` 中创建一条 `status = 'pending'` 的记录 +- **AND** 记录 SHALL 包含 `user_id`、`model`、`prompt`、`media_type` + +#### Scenario: 上游开始处理时更新为 generating +- **WHEN** 上游开始处理生成任务 +- **THEN** 系统 SHALL 更新记录 `status = 'generating'` +- **AND** 记录 `upstream_task_id` + +#### Scenario: 生成成功时更新为 completed +- **WHEN** 生成完成且媒体文件存储成功 +- **THEN** 系统 SHALL 更新记录 `status = 'completed'` +- **AND** 更新 `media_url`、`media_urls`、`file_size_bytes`、`storage_type`、`s3_object_keys`、`completed_at` + +#### Scenario: 生成失败时更新为 failed +- **WHEN** 生成过程中发生错误 +- **THEN** 系统 SHALL 更新记录 `status = 'failed'` +- **AND** 记录 `error_message` + +#### Scenario: 用户取消生成 +- **WHEN** 用户通过 `POST /api/v1/sora/generations/:id/cancel` 取消任务 +- **AND** 记录状态为 `pending` 或 `generating` +- **THEN** 系统 SHALL 更新记录 `status = 'cancelled'` +- **AND** SHALL 不累加配额 + +#### Scenario: 手动保存到存储后更新 +- **WHEN** 用户对 `storage_type = 'upstream'` 的记录手动触发保存 +- **AND** S3 上传成功 +- **THEN** 系统 SHALL 更新 `storage_type = 's3'`、`s3_object_keys`、`file_size_bytes` +- **AND** 累加存储配额 + +### Requirement: 查询生成历史列表 +系统 SHALL 提供分页查询用户生成历史的 API。 + +#### Scenario: 获取用户生成历史 +- **WHEN** 用户请求 `GET /api/v1/sora/generations` +- **THEN** 系统 SHALL 返回当前用户的生成记录列表,按 `created_at DESC` 排序 +- **AND** 支持分页参数 `page`(默认 1)和 `page_size`(默认 20,最大 100) + +#### Scenario: 按媒体类型筛选 +- **WHEN** 请求携带 `media_type=video` 或 `media_type=image` +- **THEN** 系统 SHALL 只返回对应类型的记录 + +#### Scenario: 按状态筛选 +- **WHEN** 请求携带 `status=completed` +- **THEN** 系统 SHALL 只返回对应状态的记录 + +#### Scenario: 按存储类型筛选(作品库专用) +- **WHEN** 请求携带 `storage_type=s3,local` +- **THEN** 系统 SHALL 返回已持久化存储(S3 或本地)的记录 +- **AND** 作品库页面默认 SHALL 使用 `storage_type=s3,local` 筛选,展示所有已保存的作品 +- **AND** `storage_type='upstream'` 和 `'none'` 的记录 SHALL 不在作品库中显示 + +#### Scenario: 预签名 URL 动态生成 +- **WHEN** 返回 `storage_type = 's3'` 的记录列表 +- **AND** 未配置 CDN URL +- **THEN** 系统 SHALL 为每条记录动态生成新的 S3 预签名 URL(24 小时有效) +- **AND** 前端 SHALL 不缓存媒体 URL + +#### Scenario: 恢复进行中的任务 +- **WHEN** 请求携带 `status=pending,generating` +- **THEN** 系统 SHALL 返回用户所有进行中的生成任务 +- **AND** 前端页面加载时 SHALL 调用此接口恢复任务进度显示 + +### Requirement: 查询生成详情 +系统 SHALL 提供查询单条生成记录详情的 API。 + +#### Scenario: 获取生成详情 +- **WHEN** 用户请求 `GET /api/v1/sora/generations/:id` +- **AND** 该记录属于当前用户 +- **THEN** 系统 SHALL 返回完整的生成记录详情 + +#### Scenario: 访问他人记录返回 404 +- **WHEN** 用户请求的生成记录不属于当前用户 +- **THEN** 系统 SHALL 返回 HTTP 404 + +### Requirement: 删除生成记录 +系统 SHALL 提供删除生成记录的 API,并联动清理存储文件和配额。 + +#### Scenario: 删除单条记录 +- **WHEN** 用户请求 `DELETE /api/v1/sora/generations/:id` +- **AND** 该记录属于当前用户 +- **THEN** 系统 SHALL 删除数据库记录 +- **AND** 若 `storage_type = 's3'`,SHALL 删除 S3 文件 +- **AND** 若 `storage_type = 'local'`,SHALL 删除本地文件 +- **AND** SHALL 释放对应的存储配额 + +#### Scenario: 删除不存在的记录 +- **WHEN** 记录不存在或不属于当前用户 +- **THEN** 系统 SHALL 返回 HTTP 404 + +### Requirement: 无存储模式下保留生成历史 +系统 SHALL 在无存储可用时仍记录生成元数据。 + +#### Scenario: 无存储时记录元数据 +- **WHEN** S3 和本地存储均不可用 +- **AND** 客户端 UI 生成完成 +- **THEN** 系统 SHALL 创建生成记录,`storage_type = 'upstream'` +- **AND** `media_url` 为上游临时 URL +- **AND** 系统 SHALL 不累加存储配额 + +#### Scenario: 过期 URL 标记与倒计时 +- **WHEN** 生成记录的 `storage_type = 'upstream'` +- **THEN** 客户端 SHALL 显示 15 分钟倒计时进度条(基于 `completed_at` 计算剩余时间) +- **AND** 剩余 5 分钟时 SHALL 通过浏览器通知提醒用户 +- **AND** 剩余 2 分钟时卡片边框 SHALL 变为红色警告态 +- **AND** 超过 15 分钟后 SHALL 显示"链接已过期,作品无法恢复",禁用下载和保存按钮 diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-s3-media-storage/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-s3-media-storage/spec.md new file mode 100644 index 000000000..6d226c62c --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-s3-media-storage/spec.md @@ -0,0 +1,104 @@ +## ADDED Requirements + +### Requirement: S3 媒体存储服务初始化 +系统 SHALL 在启动时从系统设置(Settings 表)读取 Sora S3 配置,使用 `aws-sdk-go-v2` 初始化 S3 客户端。 + +#### Scenario: Sora S3 已启用且配置完整 +- **WHEN** 系统启动或 S3 配置变更 +- **AND** Settings 中 `sora_s3_enabled = true` 且必填字段(endpoint、bucket、access_key_id、secret_access_key)均已配置 +- **THEN** 系统 SHALL 使用 `aws-sdk-go-v2` 初始化 S3 客户端 +- **AND** 系统 SHALL 缓存 S3 客户端实例,标记 S3 存储为可用 + +#### Scenario: Sora S3 未启用或配置不完整 +- **WHEN** 系统启动或 S3 配置变更 +- **AND** `sora_s3_enabled = false` 或缺少必填配置 +- **THEN** 系统 SHALL 标记 S3 存储为不可用 +- **AND** 客户端 UI 调用路径 SHALL 降级为本地存储或即生即下载模式 + +### Requirement: 媒体文件上传到 S3 +系统 SHALL 将 Sora 客户端 UI 生成的媒体文件流式上传到 S3 兼容存储。 + +#### Scenario: 视频文件上传成功 +- **WHEN** Sora 客户端 UI 调用路径生成完成,返回上游媒体 URL +- **AND** S3 存储可用 +- **THEN** 系统 SHALL 使用流式管道(`io.Pipe`)从上游 URL 下载并同时上传到 S3 +- **AND** S3 object key 格式 SHALL 为 `sora/{user_id}/{YYYY/MM/DD}/{uuid}.{ext}` +- **AND** 上传完成后 SHALL 返回 S3 访问 URL(签名 URL 或 CDN URL) +- **AND** 系统 SHALL 记录 `s3_object_keys` 数组到生成记录中(视频为单元素数组) + +#### Scenario: 图片文件上传成功 +- **WHEN** Sora 客户端 UI 生成图片完成 +- **AND** S3 存储可用 +- **THEN** 系统 SHALL 使用与视频相同的上传流程将图片上传到 S3 +- **AND** 支持多图场景(`media_urls` 数组中每个 URL 都上传) + +#### Scenario: S3 上传失败降级 +- **WHEN** S3 上传过程中发生错误(网络超时、权限错误等) +- **THEN** 系统 SHALL 降级到本地磁盘存储(复用现有 `SoraMediaStorage`) +- **AND** 若本地存储也失败,SHALL 降级为返回上游临时 URL +- **AND** 生成记录的 `storage_type` SHALL 反映实际存储位置 + +#### Scenario: 大文件流式上传避免内存溢出 +- **WHEN** 上游媒体文件大于 50MB +- **THEN** 系统 SHALL 使用流式管道上传,不将完整文件缓存到内存 +- **AND** 内存峰值 SHALL 不超过 16MB 缓冲区 + +### Requirement: S3 文件删除 +系统 SHALL 在用户删除生成记录时同步删除 S3 中对应的文件。 + +#### Scenario: 删除 S3 文件(含多图) +- **WHEN** 用户通过作品库删除一条生成记录 +- **AND** 该记录的 `storage_type = 's3'` 且 `s3_object_keys` 非空 +- **THEN** 系统 SHALL 遍历 `s3_object_keys` 数组,逐一调用 S3 DeleteObject 删除所有文件 +- **AND** 释放对应的存储配额(`sora_storage_used_bytes` 减去 `file_size_bytes`) + +#### Scenario: S3 删除失败不阻塞记录删除 +- **WHEN** S3 DeleteObject 调用失败(部分或全部) +- **THEN** 系统 SHALL 仍然删除数据库中的生成记录 +- **AND** 系统 SHALL 记录告警日志,包含失败的 `s3_object_keys` 以便后续清理 + +### Requirement: 三层降级链 +系统 SHALL 支持 S3 → 本地磁盘 → 上游临时 URL 的三层存储降级。 + +#### Scenario: S3 可用时优先使用 S3 +- **WHEN** 客户端 UI 生成完成 +- **AND** S3 存储可用 +- **THEN** 系统 SHALL 使用 S3 存储,`storage_type = 's3'` + +#### Scenario: S3 不可用时降级到本地 +- **WHEN** 客户端 UI 生成完成 +- **AND** S3 存储不可用但本地存储启用 +- **THEN** 系统 SHALL 使用本地存储,`storage_type = 'local'` + +#### Scenario: 均不可用时透传上游 URL +- **WHEN** 客户端 UI 生成完成 +- **AND** S3 和本地存储均不可用 +- **THEN** 系统 SHALL 直接返回上游临时 URL,`storage_type = 'upstream'` +- **AND** 客户端 SHALL 显示即时下载提示 + +### Requirement: S3 访问 URL 生成策略 +系统 SHALL 为 S3 中的媒体文件按配置生成可访问 URL(CDN 优先,预签名兜底)。 + +#### Scenario: 配置 CDN URL 时返回 CDN 地址 +- **WHEN** 系统设置中配置了 `sora_s3_cdn_url` +- **THEN** 系统 SHALL 返回基于 `sora_s3_cdn_url + object_key` 的访问地址 +- **AND** SHALL 不额外生成预签名 URL + +#### Scenario: 未配置 CDN URL 时生成预签名 URL +- **WHEN** 系统未配置 `sora_s3_cdn_url` +- **THEN** 系统 SHALL 生成 S3 预签名 URL,有效期 SHALL 为 24 小时 +- **AND** URL SHALL 支持直接在浏览器中播放/查看 + +### Requirement: 预签名 URL 动态刷新 +系统 SHALL 在返回 S3 媒体记录时动态生成访问 URL,避免预签名过期导致作品库碎图。 + +#### Scenario: 列表 API 动态生成 URL +- **WHEN** `GET /api/v1/sora/generations` 返回 `storage_type = 's3'` 的记录 +- **AND** 未配置 CDN URL +- **THEN** 后端 SHALL 为每条记录的 `s3_object_keys` 动态生成新的预签名 URL 填充到 `media_url` / `media_urls` +- **AND** 前端 SHALL 不缓存这些 URL + +#### Scenario: 详情 API 动态生成 URL +- **WHEN** `GET /api/v1/sora/generations/:id` 返回 `storage_type = 's3'` 的记录 +- **THEN** 后端 SHALL 动态生成预签名 URL +- **AND** 批量签名性能 SHALL 不影响列表加载速度(使用并发签名或缓存短期 URL) diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-s3-settings/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-s3-settings/spec.md new file mode 100644 index 000000000..da9aea93f --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-s3-settings/spec.md @@ -0,0 +1,39 @@ +## ADDED Requirements + +### Requirement: Sora S3 存储配置 +系统 SHALL 在系统设置中提供独立的 Sora S3 存储配置,使用 `aws-sdk-go-v2` 直连 S3 兼容存储,不依赖现有数据管理的 gRPC 代理。 + +#### Scenario: 系统设置新增 Sora S3 配置项 +- **WHEN** 管理员访问系统设置页面 +- **THEN** 页面 SHALL 显示"Sora S3 存储配置"区域 +- **AND** 包含以下配置项: + - 启用开关(`sora_s3_enabled`) + - S3 端点(`sora_s3_endpoint`) + - 区域(`sora_s3_region`) + - 存储桶(`sora_s3_bucket`) + - 访问密钥 ID(`sora_s3_access_key_id`) + - 访问密钥(`sora_s3_secret_access_key`,加密存储,界面显示为密码框) + - 对象键前缀(`sora_s3_prefix`,可选) + - 强制路径模式(`sora_s3_force_path_style`,可选) + - CDN 域名(`sora_s3_cdn_url`,可选) + +#### Scenario: 保存 Sora S3 配置 +- **WHEN** 管理员填写 S3 配置并点击保存 +- **THEN** 系统 SHALL 将配置保存到 Settings 表 +- **AND** `sora_s3_secret_access_key` SHALL 加密存储 +- **AND** Sora S3 Storage Service SHALL 刷新缓存的 S3 客户端配置 + +#### Scenario: 测试 S3 连接 +- **WHEN** 管理员点击"测试连接"按钮 +- **THEN** 系统 SHALL 使用当前表单中的配置创建临时 S3 客户端 +- **AND** 执行 `HeadBucket` 或 `PutObject` + `DeleteObject` 测试连通性 +- **AND** 返回测试结果(成功/失败 + 错误信息) + +#### Scenario: 禁用 Sora S3 存储 +- **WHEN** 管理员关闭 `sora_s3_enabled` 开关 +- **THEN** Sora 客户端 UI 的生成结果 SHALL 降级到本地存储或上游 URL 透传 + +#### Scenario: S3 配置不完整 +- **WHEN** `sora_s3_enabled = true` 但缺少必填字段(endpoint/bucket/access_key_id/secret_access_key) +- **THEN** 系统 SHALL 视为 S3 存储不可用 +- **AND** SHALL 在日志中记录配置不完整的警告 diff --git a/openspec/changes/sora-client-s3-storage/specs/sora-user-storage-quota/spec.md b/openspec/changes/sora-client-s3-storage/specs/sora-user-storage-quota/spec.md new file mode 100644 index 000000000..ae899c87a --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/specs/sora-user-storage-quota/spec.md @@ -0,0 +1,91 @@ +## ADDED Requirements + +### Requirement: 用户存储配额字段 +系统 SHALL 在 `users` 表新增 Sora 存储配额字段,用于追踪每个用户的配额和用量。 + +#### Scenario: 用户表新增配额字段 +- **WHEN** 数据库迁移执行 +- **THEN** `users` 表 SHALL 新增 `sora_storage_quota_bytes BIGINT NOT NULL DEFAULT 0` 字段(0 表示使用系统默认) +- **AND** `users` 表 SHALL 新增 `sora_storage_used_bytes BIGINT NOT NULL DEFAULT 0` 字段 + +### Requirement: 系统默认配额设置 +系统 SHALL 提供全局默认 Sora 存储配额设置,管理员可在系统设置中配置。 + +#### Scenario: 管理员设置全局默认配额 +- **WHEN** 管理员在系统设置中设置 `sora_default_storage_quota_bytes` +- **THEN** 系统 SHALL 将该值保存到 Settings 表 +- **AND** 所有未单独设置配额的用户 SHALL 使用该默认值 + +#### Scenario: 未设置全局默认配额 +- **WHEN** `sora_default_storage_quota_bytes` 未设置或为 0 +- **THEN** 系统 SHALL 不限制用户存储空间(即无配额限制) + +### Requirement: 配额优先级判断 +系统 SHALL 按用户级 → 分组级 → 系统默认的优先级计算有效配额。 + +#### Scenario: 用户级配额优先 +- **WHEN** 用户 `sora_storage_quota_bytes > 0` +- **THEN** 有效配额 SHALL 为用户级配额值 + +#### Scenario: 分组级配额次优先 +- **WHEN** 用户 `sora_storage_quota_bytes = 0`(未单独设置) +- **AND** 用户所属分组 `sora_storage_quota_bytes > 0` +- **THEN** 有效配额 SHALL 为分组级配额值 + +#### Scenario: 系统默认配额兜底 +- **WHEN** 用户和分组的配额均未设置(均为 0) +- **THEN** 有效配额 SHALL 为 `settings.sora_default_storage_quota_bytes` + +### Requirement: 生成前配额检查 +系统 SHALL 在客户端 UI 调用路径发起生成前检查存储配额。 + +#### Scenario: 配额充足允许生成 +- **WHEN** 用户发起 Sora 客户端生成请求 +- **AND** `sora_storage_used_bytes < 有效配额` +- **THEN** 系统 SHALL 允许生成请求继续 + +#### Scenario: 配额不足拒绝生成 +- **WHEN** 用户发起 Sora 客户端生成请求 +- **AND** `sora_storage_used_bytes >= 有效配额` +- **AND** 有效配额 > 0 +- **THEN** 系统 SHALL 返回 HTTP 429 错误 +- **AND** 响应 SHALL 包含 `{ quota_bytes, used_bytes, message: "存储配额已满,请删除不需要的作品释放空间" }` +- **AND** 响应 SHALL 包含 `guide: "delete_works"` 字段,前端据此显示引导对话框 + +#### Scenario: 无配额限制时不检查 +- **WHEN** 有效配额 = 0(系统默认也未设置) +- **THEN** 系统 SHALL 跳过配额检查,允许生成 + +### Requirement: 配额原子更新 +系统 SHALL 使用原子操作更新用户已用存储空间,防止并发超额。 + +#### Scenario: 生成完成后累加用量 +- **WHEN** 媒体文件上传到 S3/本地存储成功 +- **THEN** 系统 SHALL 在计算出 `effective_quota` 后执行原子 SQL:`UPDATE users SET sora_storage_used_bytes = sora_storage_used_bytes + :file_size WHERE id = :id AND (:effective_quota = 0 OR sora_storage_used_bytes + :file_size <= :effective_quota)` +- **AND** 若原子更新失败(超额),系统 SHALL 删除已上传的文件并返回配额错误 + +#### Scenario: 删除作品后释放配额 +- **WHEN** 用户删除一条生成记录 +- **AND** 该记录 `file_size_bytes > 0` +- **THEN** 系统 SHALL 执行 `UPDATE users SET sora_storage_used_bytes = sora_storage_used_bytes - file_size WHERE id = ?` +- **AND** `sora_storage_used_bytes` SHALL 不低于 0 + +### Requirement: 配额查询 API +系统 SHALL 提供配额查询接口,用户可查看当前用量和剩余空间。 + +#### Scenario: 查询用户 Sora 配额 +- **WHEN** 用户请求 `GET /api/v1/sora/quota` +- **THEN** 系统 SHALL 返回 `{ quota_bytes, used_bytes, available_bytes, quota_source }` +- **AND** `quota_source` SHALL 标明配额来源("user" / "group" / "system" / "unlimited") + +### Requirement: 管理员配额管理 +管理员 SHALL 可以在用户管理和分组管理中设置 Sora 存储配额。 + +#### Scenario: 管理员设置单个用户配额 +- **WHEN** 管理员在用户编辑页面设置 Sora 存储配额 +- **THEN** 系统 SHALL 更新 `users.sora_storage_quota_bytes` + +#### Scenario: 管理员设置分组配额 +- **WHEN** 管理员在分组管理中设置 Sora 存储配额 +- **THEN** 系统 SHALL 更新 `groups.sora_storage_quota_bytes` 字段 +- **AND** 该分组下所有未单独设置配额的用户 SHALL 使用分组配额 diff --git a/openspec/changes/sora-client-s3-storage/tasks.md b/openspec/changes/sora-client-s3-storage/tasks.md new file mode 100644 index 000000000..feff095ee --- /dev/null +++ b/openspec/changes/sora-client-s3-storage/tasks.md @@ -0,0 +1,144 @@ +## 1. 数据库迁移 + +- [ ] 1.1 创建 `sora_generations` 表迁移脚本(含 `s3_object_keys JSONB` 数组字段、所有索引、外键约束) +- [ ] 1.2 `users` 表新增 `sora_storage_quota_bytes` 和 `sora_storage_used_bytes` 字段 +- [ ] 1.3 `groups` 表新增 `sora_storage_quota_bytes` 字段 +- [ ] 1.4 系统设置新增 `sora_default_storage_quota_bytes` 键值 + +## 2. Sora S3 存储配置(系统设置) + +- [ ] 2.1 后端:Settings 表新增 Sora S3 配置键值(sora_s3_enabled、sora_s3_endpoint、sora_s3_region、sora_s3_bucket、sora_s3_access_key_id、sora_s3_secret_access_key、sora_s3_prefix、sora_s3_force_path_style、sora_s3_cdn_url) +- [ ] 2.2 后端:系统设置 API 新增 Sora S3 配置读写接口(含 secret_access_key 加密存储) +- [ ] 2.3 后端:新增 Sora S3 连接测试接口(HeadBucket 验证连通性) +- [ ] 2.4 前端:系统设置页面新增"Sora S3 存储配置"区域(启用开关 + S3 连接表单 + 测试连接按钮) + +## 3. Sora API Key 账号类型(sora-account-apikey) + +- [ ] 3.1 前端 `CreateAccountModal.vue`:取消 Sora 平台 OAuth 硬编码限制(第 2597-2601 行) +- [ ] 3.2 前端 `CreateAccountModal.vue`:新增 Sora 平台的"API Key / 上游透传"选项卡和表单(base_url + api_key) +- [ ] 3.3 前端 `EditAccountModal.vue`:支持编辑 Sora apikey 类型账号 +- [ ] 3.4 前端 `credentialsBuilder.ts`:新增 Sora apikey 类型的 credentials 构建逻辑 +- [ ] 3.5 后端 `sora_gateway_service.go`:`Forward()` 方法新增 apikey 类型分支判断 +- [ ] 3.6 后端新增 `sora_upstream_forwarder.go`:实现 `forwardToUpstream()` HTTP 透传方法(流式+非流式) +- [ ] 3.7 后端 apikey 透传错误处理:复用 `UpstreamFailoverError` 机制实现失败转移 +- [ ] 3.8 前端/后端:Sora apikey 账号连通性测试支持 +- [ ] 3.9 前端/后端:Sora apikey 账号 `base_url` 校验(必填 + scheme 合法)与上游 URL 规范化拼接 + +## 4. S3 媒体存储服务(sora-s3-media-storage) + +- [ ] 4.1 引入 `aws-sdk-go-v2` 依赖;新增 `service/sora_s3_storage.go`:从 Settings 表读取 S3 配置,初始化 aws-sdk-go-v2 S3 客户端并缓存 +- [ ] 4.2 实现流式上传方法:从上游 URL 下载并通过 `io.Pipe` 流式上传到 S3 +- [ ] 4.3 实现 S3 object key 命名规则:`sora/{user_id}/{YYYY/MM/DD}/{uuid}.{ext}`,多图生成多个 key 存入 `s3_object_keys` JSONB 数组 +- [ ] 4.4 实现 S3 访问 URL 策略:CDN URL 优先,否则动态生成 24h 预签名 URL(列表/详情接口每次请求时重新签名) +- [ ] 4.5 实现 S3 文件删除方法(遍历 `s3_object_keys` 数组逐一删除) +- [ ] 4.6 实现三层降级链逻辑:S3 → 本地(复用 SoraMediaStorage)→ 上游临时 URL +- [ ] 4.7 系统设置中 S3 配置变更时自动刷新缓存的 S3 客户端 + +## 5. 用户存储配额管理(sora-user-storage-quota) + +- [ ] 5.1 新增 `service/sora_quota_service.go`:配额优先级判断逻辑(用户 → 分组 → 系统默认) +- [ ] 5.2 实现配额检查方法:生成前检查存储是否超限 +- [ ] 5.3 实现配额原子更新:上传成功后累加用量,删除后释放用量 +- [ ] 5.4 实现配额查询 API:`GET /api/v1/sora/quota` 返回配额信息 + +## 6. 生成记录管理(sora-generation-history) + +- [ ] 6.1 新增 `service/sora_generation_service.go`:生成记录 CRUD 方法 +- [ ] 6.2 实现创建记录(pending → generating → completed/failed 状态流转) +- [ ] 6.3 实现查询历史列表(分页 + 按类型/状态筛选 + 按创建时间倒序) +- [ ] 6.4 实现查询详情(权限校验:只能查看自己的记录) +- [ ] 6.5 实现删除记录(联动 S3/本地文件清理 + 配额释放) +- [ ] 6.6 无存储模式下记录元数据(storage_type='upstream',不累加配额) + +## 7. Sora 客户端 Handler 与路由(sora-generation-gateway) + +- [ ] 7.1 新增 `handler/sora_client_handler.go`:客户端 API Handler +- [ ] 7.2 实现 `POST /api/v1/sora/generate`(异步):配额检查 → 并发数检查(≤3) → 创建 pending 记录 → **立即返回 generation_id** → 后台异步(Forward → 自动上传S3/降级 → 更新记录 → 累加配额) +- [ ] 7.3 实现 `GET /api/v1/sora/generations`:历史列表接口(支持 status/storage_type/media_type 筛选;S3 记录动态生成预签名 URL) +- [ ] 7.4 实现 `GET /api/v1/sora/generations/:id`:详情接口(动态预签名 URL) +- [ ] 7.5 实现 `DELETE /api/v1/sora/generations/:id`:删除接口 +- [ ] 7.6 实现 `GET /api/v1/sora/quota`:配额查询接口 +- [ ] 7.7 实现 `GET /api/v1/sora/models`:可用模型列表接口 +- [ ] 7.8 注册路由:`server/routes/` 新增 `/api/v1/sora/*` 路由组 +- [ ] 7.9 调整 `/sora/v1/chat/completions` 直调路径:保持纯透传,不执行本地/S3 媒体落盘 +- [ ] 7.10 实现 `POST /api/v1/sora/generations/:id/save`:手动保存到 S3(仅 upstream 记录,含 URL 过期检测) +- [ ] 7.11 实现 `POST /api/v1/sora/generations/:id/cancel`:取消生成任务(标记 cancelled,忽略后续结果) +- [ ] 7.12 实现 `GET /api/v1/sora/storage-status`:返回 { s3_enabled, s3_healthy, local_enabled } + +## 8. 管理员配额管理界面 + +- [ ] 8.1 系统设置页面:新增"Sora 默认存储配额"设置项 +- [ ] 8.2 用户管理页面:用户编辑表单新增"Sora 存储配额"字段 +- [ ] 8.3 分组管理页面:分组编辑表单新增"Sora 存储配额"字段 +- [ ] 8.4 后端 API 适配:用户/分组的创建和更新接口支持新增字段 + +## 9. Sora 客户端前端 - 基础框架(sora-client-ui) + +- [ ] 9.1 新增 `views/user/SoraView.vue`:Sora 客户端主页面容器(暗色主题) +- [ ] 9.2 新增 `components/sora/SoraNavBar.vue`:顶部导航栏(Tab 切换 + 配额条 + 用户头像) +- [ ] 9.3 前端路由注册:`router/index.ts` 新增 `/sora` 路由(需登录认证) +- [ ] 9.4 侧边栏菜单:`AppSidebar.vue` 新增 Sora 菜单项 +- [ ] 9.5 API 模块:新增 `api/sora.ts`,封装所有 Sora 客户端 API 调用 + +## 10. Sora 客户端前端 - 生成页 + +- [ ] 10.1 新增 `components/sora/SoraGeneratePage.vue`:生成页主容器(多任务时间线布局) +- [ ] 10.2 新增 `components/sora/SoraPromptBar.vue`:底部创作栏(提示词输入 + 参数选择 + 生成按钮 + 活跃任务计数) +- [ ] 10.3 新增 `components/sora/SoraModelSelector.vue`:模型选择下拉(视频/图片分组) +- [ ] 10.4 新增 `components/sora/SoraProgressCard.vue`:生成进度卡片(6 种状态:pending/generating/completed-s3/completed-upstream/failed/cancelled) + - pending/generating:显示已等待时长 + 预估剩余 + 取消按钮 + - completed-s3:显示"✓ 已保存到云端" + 本地下载 + - completed-upstream:显示 15 分钟倒计时 + 本地下载 + 保存到存储 + - failed:分类错误信息 + 重试/编辑后重试/删除 + - cancelled:已取消 + 重新生成/删除 +- [ ] 10.5 新增 `components/sora/SoraNoStorageWarning.vue`:无存储提示组件 +- [ ] 10.6 实现 Ctrl/Cmd + Enter 快捷键触发生成 +- [ ] 10.7 实现图片模型切换时隐藏方向/时长选择 +- [ ] 10.8 实现参考图上传功能 +- [ ] 10.9 实现前端轮询机制:递减频率(3s→10s→30s)轮询 GET /api/v1/sora/generations/:id +- [ ] 10.10 实现页面加载时恢复进行中任务(GET /api/v1/sora/generations?status=pending,generating) +- [ ] 10.11 实现浏览器通知(Notification API):任务完成/失败时通知 + 标签页 title 闪烁 +- [ ] 10.12 实现 beforeunload 警告:存在未下载的 upstream 记录时阻止离开 +- [ ] 10.13 实现 upstream 记录的 15 分钟倒计时 UI(进度条 + 红色警告态) +- [ ] 10.14 实现取消生成功能:调用 POST /api/v1/sora/generations/:id/cancel + 二次确认 + +## 11. Sora 客户端前端 - 作品库页 + +- [ ] 11.1 新增 `components/sora/SoraLibraryPage.vue`:作品库页主容器(请求 storage_type=s3,local 筛选已保存作品) +- [ ] 11.2 新增 `components/sora/SoraLibraryGrid.vue`:响应式网格布局(CSS Grid auto-fill, 4→3→2→1 列) +- [ ] 11.3 新增 `components/sora/SoraMediaCard.vue`:作品卡片(缩略图 + 类型角标 + hover 显示"📥 下载"和"🗑 删除") +- [ ] 11.4 新增 `components/sora/SoraEmptyState.vue`:空状态引导(图标 + "暂无作品" + "开始创作"按钮) +- [ ] 11.5 实现全部/视频/图片筛选功能 +- [ ] 11.6 实现分页加载(滚动加载或按钮加载) + +## 12. Sora 客户端前端 - 弹窗与辅助组件 + +- [ ] 12.1 新增 `components/sora/SoraMediaPreview.vue`:作品详情预览弹窗 +- [ ] 12.2 新增 `components/sora/SoraQuotaBar.vue`:配额进度条组件 +- [ ] 12.3 新增 `components/sora/SoraDownloadDialog.vue`:即时下载弹窗(无存储模式) +- [ ] 12.4 实现视频缩略图:前端用 `
{{ t('admin.dataManagement.history.columns.jobID') }} {{ t('admin.dataManagement.history.columns.type') }} {{ t('admin.dataManagement.history.columns.status') }} {{ t('admin.dataManagement.history.columns.triggeredBy') }}{{ t('admin.dataManagement.history.columns.pgProfile') }}{{ t('admin.dataManagement.history.columns.redisProfile') }}{{ t('admin.dataManagement.history.columns.s3Profile') }} {{ t('admin.dataManagement.history.columns.finishedAt') }} {{ t('admin.dataManagement.history.columns.artifact') }} {{ t('admin.dataManagement.history.columns.error') }} {{ job.triggered_by }}{{ job.postgres_profile_id || '-' }}{{ job.redis_profile_id || '-' }}{{ job.s3_profile_id || '-' }} {{ formatDate(job.finished_at || job.started_at) }}
{{ job.artifact?.local_path || '-' }}
@@ -276,7 +518,7 @@
{{ job.error_message || '-' }}
+ {{ t('admin.dataManagement.history.empty') }}