From 9ddd2ac6094671d7c5c3719b38d13a75419f2aca Mon Sep 17 00:00:00 2001 From: Andrei Vydrin Date: Tue, 16 Jul 2024 16:47:00 +0700 Subject: [PATCH] feat: add lock_all_projects_before_exec --- .../docs/repo-level-atlantis-yaml.md | 24 +++++ server/core/config/raw/repo_cfg.go | 11 +++ server/core/config/raw/repo_cfg_test.go | 14 +-- server/core/config/valid/repo_cfg.go | 1 + server/events/apply_command_runner.go | 40 +++++++- server/events/command/name.go | 6 ++ server/events/command/project_context.go | 2 + server/events/command/project_result.go | 6 +- server/events/command/project_result_test.go | 7 ++ .../instrumented_project_command_runner.go | 5 + server/events/markdown_renderer.go | 4 + server/events/markdown_renderer_test.go | 12 +++ .../mocks/mock_project_command_runner.go | 42 ++++++++ server/events/plan_command_runner.go | 65 ++++++++++++- server/events/project_command_builder.go | 10 ++ .../events/project_command_context_builder.go | 17 +++- .../project_command_context_builder_test.go | 8 +- server/events/project_command_runner.go | 96 ++++++++++--------- 18 files changed, 307 insertions(+), 63 deletions(-) diff --git a/runatlantis.io/docs/repo-level-atlantis-yaml.md b/runatlantis.io/docs/repo-level-atlantis-yaml.md index 11feb31224..36dd3a0e24 100644 --- a/runatlantis.io/docs/repo-level-atlantis-yaml.md +++ b/runatlantis.io/docs/repo-level-atlantis-yaml.md @@ -61,6 +61,7 @@ delete_source_branch_on_merge: true parallel_plan: true parallel_apply: true abort_on_execution_order_fail: true +lock_all_projects_before_exec: true projects: - name: my-project-name branch: /main/ @@ -394,6 +395,25 @@ it's still desirable for Atlantis to plan/apply for projects not enumerated in t See [Custom Workflow Use Cases: Custom Backend Config](custom-workflows.md#custom-backend-config) +### Lock all changed projects before plan/apply + +```yaml +lock_all_projects_before_exec: true +``` + +By default, Atlantis acquires a lock for project right before running `plan` on it +(in this context, and later, we use `plan`, but this option works similarly with `apply` locking). +For example, if you have a pull request with changes in projects `project1` and `project2`, +scheme of locking will be the following (parallel planning is disabled for easier understanding): +``` +lock project1 -> plan project1 -> lock project2 (this lock may fail) -> plan project2 +``` +With this option enabled, Atlantis will lock each changed project before running `plan` on any of the projects. +So, in the same example, the scheme will be as follows: +``` +lock project1 -> lock project2 (locks for all changed projects are acquired, or not) -> plan project1 -> plan project2 +``` + ## Reference ### Top-Level Keys @@ -405,6 +425,8 @@ delete_source_branch_on_merge: false projects: workflows: allowed_regexp_prefixes: +abort_on_execution_order_fail: false +lock_all_projects_before_exec: false ``` | Key | Type | Default | Required | Description | @@ -415,6 +437,8 @@ allowed_regexp_prefixes: | projects | array[[Project](repo-level-atlantis-yaml.md#project)] | `[]` | no | Lists the projects in this repo. | | workflows
*(restricted)* | map[string: [Workflow](custom-workflows.md#reference)] | `{}` | no | Custom workflows. | | allowed_regexp_prefixes | array\[string\] | `[]` | no | Lists the allowed regexp prefixes to use when the [`--enable-regexp-cmd`](server-configuration.md#enable-regexp-cmd) flag is used. | +| abort_on_execution_order_fail | bool | `false` | no | Stops all following execution groups when failed in some one. | +| lock_all_projects_before_exec | bool | `false` | no | Acquires locks on each projects before planning/applying on any of project. | ### Project diff --git a/server/core/config/raw/repo_cfg.go b/server/core/config/raw/repo_cfg.go index 9aa18c7733..427e4a94bf 100644 --- a/server/core/config/raw/repo_cfg.go +++ b/server/core/config/raw/repo_cfg.go @@ -13,6 +13,9 @@ const DefaultEmojiReaction = "" // DefaultAbortOnExcecutionOrderFail being false is the default setting for abort on execution group failiures const DefaultAbortOnExcecutionOrderFail = false +// DefaultLockAllProjectsBeforeExec being false is the default setting for locking all projects before plan/apply +const DefaultLockAllProjectsBeforeExec = false + // RepoCfg is the raw schema for repo-level atlantis.yaml config. type RepoCfg struct { Version *int `yaml:"version,omitempty"` @@ -29,6 +32,7 @@ type RepoCfg struct { AbortOnExcecutionOrderFail *bool `yaml:"abort_on_execution_order_fail,omitempty"` RepoLocks *RepoLocks `yaml:"repo_locks,omitempty"` SilencePRComments []string `yaml:"silence_pr_comments,omitempty"` + LockAllProjectsBeforeExec *bool `yaml:"lock_all_projects_before_exec,omitempty"` } func (r RepoCfg) Validate() error { @@ -83,6 +87,12 @@ func (r RepoCfg) ToValid() valid.RepoCfg { if r.RepoLocks != nil { repoLocks = r.RepoLocks.ToValid() } + + lockAllProjectsBeforeExec := DefaultLockAllProjectsBeforeExec + if r.LockAllProjectsBeforeExec != nil { + lockAllProjectsBeforeExec = *r.LockAllProjectsBeforeExec + } + return valid.RepoCfg{ Version: *r.Version, Projects: validProjects, @@ -98,5 +108,6 @@ func (r RepoCfg) ToValid() valid.RepoCfg { AbortOnExcecutionOrderFail: abortOnExcecutionOrderFail, RepoLocks: repoLocks, SilencePRComments: r.SilencePRComments, + LockAllProjectsBeforeExec: lockAllProjectsBeforeExec, } } diff --git a/server/core/config/raw/repo_cfg_test.go b/server/core/config/raw/repo_cfg_test.go index b3844ee68c..588e204639 100644 --- a/server/core/config/raw/repo_cfg_test.go +++ b/server/core/config/raw/repo_cfg_test.go @@ -134,6 +134,7 @@ parallel_apply: true parallel_plan: false repo_locks: mode: on_apply +lock_all_projects_before_exec: true projects: - dir: mydir workspace: myworkspace @@ -157,12 +158,13 @@ allowed_regexp_prefixes: - dev/ - staging/`, exp: raw.RepoCfg{ - Version: Int(3), - AutoDiscover: &raw.AutoDiscover{Mode: &autoDiscoverEnabled}, - Automerge: Bool(true), - ParallelApply: Bool(true), - ParallelPlan: Bool(false), - RepoLocks: &raw.RepoLocks{Mode: &repoLocksOnApply}, + Version: Int(3), + AutoDiscover: &raw.AutoDiscover{Mode: &autoDiscoverEnabled}, + Automerge: Bool(true), + ParallelApply: Bool(true), + ParallelPlan: Bool(false), + RepoLocks: &raw.RepoLocks{Mode: &repoLocksOnApply}, + LockAllProjectsBeforeExec: Bool(true), Projects: []raw.Project{ { Dir: String("mydir"), diff --git a/server/core/config/valid/repo_cfg.go b/server/core/config/valid/repo_cfg.go index e5a8378bd7..7070a5eea1 100644 --- a/server/core/config/valid/repo_cfg.go +++ b/server/core/config/valid/repo_cfg.go @@ -30,6 +30,7 @@ type RepoCfg struct { AllowedRegexpPrefixes []string AbortOnExcecutionOrderFail bool SilencePRComments []string + LockAllProjectsBeforeExec bool } func (r RepoCfg) FindProjectsByDirWorkspace(repoRelDir string, workspace string) []Project { diff --git a/server/events/apply_command_runner.go b/server/events/apply_command_runner.go index ee6bf8ab1f..f207143316 100644 --- a/server/events/apply_command_runner.go +++ b/server/events/apply_command_runner.go @@ -7,13 +7,18 @@ import ( "github.com/runatlantis/atlantis/server/events/vcs" ) +type ProjectCommandRunnerForApply interface { + ProjectApplyCommandRunner + ProjectLockCommandRunner +} + func NewApplyCommandRunner( vcsClient vcs.Client, disableApplyAll bool, applyCommandLocker locking.ApplyLockChecker, commitStatusUpdater CommitStatusUpdater, prjCommandBuilder ProjectApplyCommandBuilder, - prjCmdRunner ProjectApplyCommandRunner, + prjCmdRunner ProjectCommandRunnerForApply, autoMerger *AutoMerger, pullUpdater *PullUpdater, dbUpdater *DBUpdater, @@ -48,7 +53,7 @@ type ApplyCommandRunner struct { vcsClient vcs.Client commitStatusUpdater CommitStatusUpdater prjCmdBuilder ProjectApplyCommandBuilder - prjCmdRunner ProjectApplyCommandRunner + prjCmdRunner ProjectCommandRunnerForApply autoMerger *AutoMerger pullUpdater *PullUpdater dbUpdater *DBUpdater @@ -158,6 +163,13 @@ func (a *ApplyCommandRunner) Run(ctx *command.Context, cmd *CommentCommand) { return } + if len(projectCmds) > 0 && projectCmds[0].LockAllProjectsBeforeExec { + ctx.Log.Debug("locking all projects before running apply") + if !a.lockAllProjects(ctx, cmd, projectCmds) { + return + } + } + // Only run commands in parallel if enabled var result command.Result if a.isParallelEnabled(projectCmds) { @@ -224,6 +236,30 @@ func (a *ApplyCommandRunner) updateCommitStatus(ctx *command.Context, pullStatus } } +func (a *ApplyCommandRunner) lockAllProjects(ctx *command.Context, cmd PullCommand, projectCmds []command.ProjectContext) bool { + result := runProjectCmds(projectCmds, a.prjCmdRunner.Lock) + if result.HasErrors() { + ctx.Log.Err("failed to lock all projects before running apply") + + a.pullUpdater.updatePull(ctx, cmd, result) + + if err := a.commitStatusUpdater.UpdateCombinedCount( + ctx.Log, + ctx.Pull.BaseRepo, + ctx.Pull, + models.FailedCommitStatus, + command.Apply, + 0, + 0, + ); err != nil { + ctx.Log.Warn("unable to update commit status: %s", err) + } + + return false + } + return true +} + // applyAllDisabledComment is posted when apply all commands (i.e. "atlantis apply") // are disabled and an apply all command is issued. var applyAllDisabledComment = "**Error:** Running `atlantis apply` without flags is disabled." + diff --git a/server/events/command/name.go b/server/events/command/name.go index cfc541ea2b..adb216abf1 100644 --- a/server/events/command/name.go +++ b/server/events/command/name.go @@ -30,6 +30,8 @@ const ( Import // State is a command to run terraform state rm State + // LockCmd is command to acquire lock + LockCmd // Adding more? Don't forget to update String() below ) @@ -74,6 +76,8 @@ func (c Name) String() string { return "import" case State: return "state" + case LockCmd: + return "lock" } return "" } @@ -149,6 +153,8 @@ func ParseCommandName(name string) (Name, error) { return Import, nil case "state": return State, nil + case "lock": + return LockCmd, nil } return -1, fmt.Errorf("unknown command name: %s", name) } diff --git a/server/events/command/project_context.go b/server/events/command/project_context.go index 6e221ff4d1..b94645a484 100644 --- a/server/events/command/project_context.go +++ b/server/events/command/project_context.go @@ -126,6 +126,8 @@ type ProjectContext struct { // Allows custom policy check tools outside of Conftest to run in checks CustomPolicyCheck bool SilencePRComments []string + // LockAllProjectsBeforeExec controls how to lock projects, if true all projects will be locked before plan/apply + LockAllProjectsBeforeExec bool } // SetProjectScopeTags adds ProjectContext tags to a new returned scope. diff --git a/server/events/command/project_result.go b/server/events/command/project_result.go index 8f72f1d168..4d249a6369 100644 --- a/server/events/command/project_result.go +++ b/server/events/command/project_result.go @@ -18,6 +18,7 @@ type ProjectResult struct { VersionSuccess string ImportSuccess *models.ImportSuccess StateRmSuccess *models.StateRmSuccess + LockSuccess *bool ProjectName string SilencePRComments []string } @@ -83,5 +84,8 @@ func (p ProjectResult) PlanStatus() models.ProjectPlanStatus { // IsSuccessful returns true if this project result had no errors. func (p ProjectResult) IsSuccessful() bool { - return p.PlanSuccess != nil || (p.PolicyCheckResults != nil && p.Error == nil && p.Failure == "") || p.ApplySuccess != "" + return p.PlanSuccess != nil || + (p.PolicyCheckResults != nil && p.Error == nil && p.Failure == "") || + p.ApplySuccess != "" || + (p.LockSuccess != nil && *p.LockSuccess) } diff --git a/server/events/command/project_result_test.go b/server/events/command/project_result_test.go index dad68c2e54..43f9192efd 100644 --- a/server/events/command/project_result_test.go +++ b/server/events/command/project_result_test.go @@ -10,6 +10,7 @@ import ( ) func TestProjectResult_IsSuccessful(t *testing.T) { + trueVal := true cases := map[string]struct { pr command.ProjectResult exp bool @@ -32,6 +33,12 @@ func TestProjectResult_IsSuccessful(t *testing.T) { }, true, }, + "lock success": { + command.ProjectResult{ + LockSuccess: &trueVal, + }, + true, + }, "failure": { command.ProjectResult{ Failure: "failure", diff --git a/server/events/instrumented_project_command_runner.go b/server/events/instrumented_project_command_runner.go index fa27a9c244..7b95ade92c 100644 --- a/server/events/instrumented_project_command_runner.go +++ b/server/events/instrumented_project_command_runner.go @@ -13,6 +13,7 @@ type IntrumentedCommandRunner interface { ApprovePolicies(ctx command.ProjectContext) command.ProjectResult Import(ctx command.ProjectContext) command.ProjectResult StateRm(ctx command.ProjectContext) command.ProjectResult + Lock(ctx command.ProjectContext) command.ProjectResult } type InstrumentedProjectCommandRunner struct { @@ -58,6 +59,10 @@ func (p *InstrumentedProjectCommandRunner) StateRm(ctx command.ProjectContext) c return RunAndEmitStats(ctx, p.projectCommandRunner.StateRm, p.scope) } +func (p *InstrumentedProjectCommandRunner) Lock(ctx command.ProjectContext) command.ProjectResult { + return RunAndEmitStats(ctx, p.projectCommandRunner.Lock, p.scope) +} + func RunAndEmitStats(ctx command.ProjectContext, execute func(ctx command.ProjectContext) command.ProjectResult, scope tally.Scope) command.ProjectResult { commandName := ctx.CommandName.String() // ensures we are differentiating between project level command and overall command diff --git a/server/events/markdown_renderer.go b/server/events/markdown_renderer.go index 5bbfc8a47e..9982e95e9c 100644 --- a/server/events/markdown_renderer.go +++ b/server/events/markdown_renderer.go @@ -315,6 +315,10 @@ func (m *MarkdownRenderer) renderProjectResults(ctx *command.Context, results [] } else { resultData.Rendered = m.renderTemplateTrimSpace(templates.Lookup("stateRmSuccessUnwrapped"), result.StateRmSuccess) } + } else if result.LockSuccess != nil { + if !*result.LockSuccess { + continue // ignore successful locks + } // Error out if no template was found, only if there are no errors or failures. // This is because some errors and failures rely on additional context rendered by templtes, but not all errors or failures. } else if !(result.Error != nil || result.Failure != "") { diff --git a/server/events/markdown_renderer_test.go b/server/events/markdown_renderer_test.go index ace23c443a..1819a94397 100644 --- a/server/events/markdown_renderer_test.go +++ b/server/events/markdown_renderer_test.go @@ -58,6 +58,12 @@ func TestRenderErr(t *testing.T) { fmt.Errorf("some conftest error"), "**Policy Check Error**\n```\nsome conftest error\n```", }, + { + "lock error", + command.LockCmd, + err, + "**Lock Error**\n```\nerr\n```", + }, } r := events.NewMarkdownRenderer(false, false, false, false, false, false, "", "atlantis", false) @@ -122,6 +128,12 @@ func TestRenderFailure(t *testing.T) { "failure", "**Policy Check Failed**: failure", }, + { + "lock failure", + command.LockCmd, + "failure", + "**Lock Failed**: failure", + }, } r := events.NewMarkdownRenderer(false, false, false, false, false, false, "", "atlantis", false) diff --git a/server/events/mocks/mock_project_command_runner.go b/server/events/mocks/mock_project_command_runner.go index be42228cd1..ee7236d6ff 100644 --- a/server/events/mocks/mock_project_command_runner.go +++ b/server/events/mocks/mock_project_command_runner.go @@ -70,6 +70,21 @@ func (mock *MockProjectCommandRunner) Import(ctx command.ProjectContext) command return ret0 } +func (mock *MockProjectCommandRunner) Lock(ctx command.ProjectContext) command.ProjectResult { + if mock == nil { + panic("mock must not be nil. Use myMock := NewMockProjectCommandRunner().") + } + params := []pegomock.Param{ctx} + result := pegomock.GetGenericMockFrom(mock).Invoke("Lock", params, []reflect.Type{reflect.TypeOf((*command.ProjectResult)(nil)).Elem()}) + var ret0 command.ProjectResult + if len(result) != 0 { + if result[0] != nil { + ret0 = result[0].(command.ProjectResult) + } + } + return ret0 +} + func (mock *MockProjectCommandRunner) Plan(ctx command.ProjectContext) command.ProjectResult { if mock == nil { panic("mock must not be nil. Use myMock := NewMockProjectCommandRunner().") @@ -248,6 +263,33 @@ func (c *MockProjectCommandRunner_Import_OngoingVerification) GetAllCapturedArgu return } +func (verifier *VerifierMockProjectCommandRunner) Lock(ctx command.ProjectContext) *MockProjectCommandRunner_Lock_OngoingVerification { + params := []pegomock.Param{ctx} + methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Lock", params, verifier.timeout) + return &MockProjectCommandRunner_Lock_OngoingVerification{mock: verifier.mock, methodInvocations: methodInvocations} +} + +type MockProjectCommandRunner_Lock_OngoingVerification struct { + mock *MockProjectCommandRunner + methodInvocations []pegomock.MethodInvocation +} + +func (c *MockProjectCommandRunner_Lock_OngoingVerification) GetCapturedArguments() command.ProjectContext { + ctx := c.GetAllCapturedArguments() + return ctx[len(ctx)-1] +} + +func (c *MockProjectCommandRunner_Lock_OngoingVerification) GetAllCapturedArguments() (_param0 []command.ProjectContext) { + params := pegomock.GetGenericMockFrom(c.mock).GetInvocationParams(c.methodInvocations) + if len(params) > 0 { + _param0 = make([]command.ProjectContext, len(c.methodInvocations)) + for u, param := range params[0] { + _param0[u] = param.(command.ProjectContext) + } + } + return +} + func (verifier *VerifierMockProjectCommandRunner) Plan(ctx command.ProjectContext) *MockProjectCommandRunner_Plan_OngoingVerification { params := []pegomock.Param{ctx} methodInvocations := pegomock.GetGenericMockFrom(verifier.mock).Verify(verifier.inOrderContext, verifier.invocationCountMatcher, "Plan", params, verifier.timeout) diff --git a/server/events/plan_command_runner.go b/server/events/plan_command_runner.go index c2b6b7a107..7ac669bf50 100644 --- a/server/events/plan_command_runner.go +++ b/server/events/plan_command_runner.go @@ -7,6 +7,11 @@ import ( "github.com/runatlantis/atlantis/server/events/vcs" ) +type ProjectCommandRunnerForPlan interface { + ProjectPlanCommandRunner + ProjectLockCommandRunner +} + func NewPlanCommandRunner( silenceVCSStatusNoPlans bool, silenceVCSStatusNoProjects bool, @@ -15,7 +20,7 @@ func NewPlanCommandRunner( workingDir WorkingDir, commitStatusUpdater CommitStatusUpdater, projectCommandBuilder ProjectPlanCommandBuilder, - projectCommandRunner ProjectPlanCommandRunner, + projectCommandRunner ProjectCommandRunnerForPlan, dbUpdater *DBUpdater, pullUpdater *PullUpdater, policyCheckCommandRunner *PolicyCheckCommandRunner, @@ -64,7 +69,7 @@ type PlanCommandRunner struct { pendingPlanFinder PendingPlanFinder workingDir WorkingDir prjCmdBuilder ProjectPlanCommandBuilder - prjCmdRunner ProjectPlanCommandRunner + prjCmdRunner ProjectCommandRunnerForPlan dbUpdater *DBUpdater pullUpdater *PullUpdater policyCheckCommandRunner *PolicyCheckCommandRunner @@ -127,6 +132,13 @@ func (p *PlanCommandRunner) runAutoplan(ctx *command.Context) { ctx.Log.Err("deleting locks: %s", err) } + if len(projectCmds) > 0 && projectCmds[0].LockAllProjectsBeforeExec { + ctx.Log.Debug("locking all projects before running plan") + if !p.lockAllProjects(ctx, AutoplanCommand{}, projectCmds, baseRepo.FullName, pull.Num) { + return + } + } + // Only run commands in parallel if enabled var result command.Result if p.isParallelEnabled(projectCmds) { @@ -252,6 +264,13 @@ func (p *PlanCommandRunner) run(ctx *command.Context, cmd *CommentCommand) { if err != nil { ctx.Log.Err("deleting locks: %s", err) } + + if len(projectCmds) > 0 && projectCmds[0].LockAllProjectsBeforeExec { + ctx.Log.Debug("locking all projects before running plan") + if !p.lockAllProjects(ctx, cmd, projectCmds, baseRepo.FullName, pull.Num) { + return + } + } } // Only run commands in parallel if enabled @@ -382,3 +401,45 @@ func (p *PlanCommandRunner) partitionProjectCmds( func (p *PlanCommandRunner) isParallelEnabled(projectCmds []command.ProjectContext) bool { return len(projectCmds) > 0 && projectCmds[0].ParallelPlanEnabled } + +func (p *PlanCommandRunner) lockAllProjects(ctx *command.Context, cmd PullCommand, projectCmds []command.ProjectContext, repoFullname string, pullNum int) bool { + var err error + result := runProjectCmds(projectCmds, p.prjCmdRunner.Lock) + if result.HasErrors() { + ctx.Log.Err("failed to lock all projects before running plan") + _, err = p.lockingLocker.UnlockByPull(repoFullname, pullNum) + if err != nil { + ctx.Log.Err("deleting locks: %s", err) + } + + p.pullUpdater.updatePull(ctx, cmd, result) + + if err := p.commitStatusUpdater.UpdateCombinedCount( + ctx.Log, + ctx.Pull.BaseRepo, + ctx.Pull, + models.FailedCommitStatus, + command.Plan, + 0, + 0, + ); err != nil { + ctx.Log.Warn("unable to update commit status: %s", err) + } + + if err := p.commitStatusUpdater.UpdateCombinedCount( + ctx.Log, + ctx.Pull.BaseRepo, + ctx.Pull, + models.FailedCommitStatus, + command.Apply, + 0, + 0, + ); err != nil { + ctx.Log.Warn("unable to update commit status: %s", err) + } + + return false + } + + return true +} diff --git a/server/events/project_command_builder.go b/server/events/project_command_builder.go index 538c71a73f..34bfe23260 100644 --- a/server/events/project_command_builder.go +++ b/server/events/project_command_builder.go @@ -33,6 +33,8 @@ const ( DefaultDeleteSourceBranchOnMerge = false // DefaultAbortOnExcecutionOrderFail being false is the default setting for abort on execution group failiures DefaultAbortOnExcecutionOrderFail = false + // DefaultLockAllProjectsBeforeExec being false is the default setting for locking all projects before plan/apply + DefaultLockAllProjectsBeforeExec = false ) func NewInstrumentedProjectCommandBuilder( @@ -440,6 +442,7 @@ func (p *DefaultProjectCommandBuilder) buildAllCommandsByCfg(ctx *command.Contex parallelApply := p.EnableParallelApply parallelPlan := p.EnableParallelPlan abortOnExcecutionOrderFail := DefaultAbortOnExcecutionOrderFail + lockAllProjectsBeforeExec := DefaultLockAllProjectsBeforeExec if hasRepoCfg { if repoCfg.Automerge != nil { automerge = *repoCfg.Automerge @@ -451,6 +454,7 @@ func (p *DefaultProjectCommandBuilder) buildAllCommandsByCfg(ctx *command.Contex parallelPlan = *repoCfg.ParallelPlan } abortOnExcecutionOrderFail = repoCfg.AbortOnExcecutionOrderFail + lockAllProjectsBeforeExec = repoCfg.LockAllProjectsBeforeExec } if len(repoCfg.Projects) > 0 { @@ -477,6 +481,7 @@ func (p *DefaultProjectCommandBuilder) buildAllCommandsByCfg(ctx *command.Contex parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, p.TerraformExecutor, )...) } @@ -539,6 +544,7 @@ func (p *DefaultProjectCommandBuilder) buildAllCommandsByCfg(ctx *command.Contex parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, p.TerraformExecutor, )...) } @@ -836,6 +842,7 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(ctx *command.Conte parallelApply := p.EnableParallelApply parallelPlan := p.EnableParallelPlan abortOnExcecutionOrderFail := DefaultAbortOnExcecutionOrderFail + lockAllProjectsBeforeExec := DefaultLockAllProjectsBeforeExec if repoCfgPtr != nil { if repoCfgPtr.Automerge != nil { automerge = *repoCfgPtr.Automerge @@ -847,6 +854,7 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(ctx *command.Conte parallelPlan = *repoCfgPtr.ParallelPlan } abortOnExcecutionOrderFail = repoCfgPtr.AbortOnExcecutionOrderFail + lockAllProjectsBeforeExec = repoCfgPtr.LockAllProjectsBeforeExec } if len(matchingProjects) > 0 { @@ -872,6 +880,7 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(ctx *command.Conte parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, p.TerraformExecutor, )...) } @@ -896,6 +905,7 @@ func (p *DefaultProjectCommandBuilder) buildProjectCommandCtx(ctx *command.Conte parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, p.TerraformExecutor, )...) } diff --git a/server/events/project_command_context_builder.go b/server/events/project_command_context_builder.go index 6a08b16471..e5f994be1f 100644 --- a/server/events/project_command_context_builder.go +++ b/server/events/project_command_context_builder.go @@ -38,7 +38,8 @@ type ProjectCommandContextBuilder interface { prjCfg valid.MergedProjectCfg, commentFlags []string, repoDir string, - automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail bool, terraformClient terraform.Client, + automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail, lockAllProjectsBeforeExec bool, + terraformClient terraform.Client, ) []command.ProjectContext } @@ -58,13 +59,14 @@ func (cb *CommandScopedStatsProjectCommandContextBuilder) BuildProjectContext( prjCfg valid.MergedProjectCfg, commentFlags []string, repoDir string, - automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail bool, + automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail, lockAllProjectsBeforeExec bool, terraformClient terraform.Client, ) (projectCmds []command.ProjectContext) { cb.ProjectCounter.Inc(1) cmds := cb.ProjectCommandContextBuilder.BuildProjectContext( - ctx, cmdName, subCmdName, prjCfg, commentFlags, repoDir, automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail, terraformClient, + ctx, cmdName, subCmdName, prjCfg, commentFlags, repoDir, automerge, parallelApply, parallelPlan, verbose, + abortOnExcecutionOrderFail, lockAllProjectsBeforeExec, terraformClient, ) projectCmds = []command.ProjectContext{} @@ -92,7 +94,7 @@ func (cb *DefaultProjectCommandContextBuilder) BuildProjectContext( prjCfg valid.MergedProjectCfg, commentFlags []string, repoDir string, - automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail bool, + automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail, lockAllProjectsBeforeExec bool, terraformClient terraform.Client, ) (projectCmds []command.ProjectContext) { ctx.Log.Debug("Building project command context for %s", cmdName) @@ -142,6 +144,7 @@ func (cb *DefaultProjectCommandContextBuilder) BuildProjectContext( parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, ctx.Scope, ctx.PullRequestStatus, ctx.PullStatus, @@ -164,7 +167,7 @@ func (cb *PolicyCheckProjectCommandContextBuilder) BuildProjectContext( prjCfg valid.MergedProjectCfg, commentFlags []string, repoDir string, - automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail bool, + automerge, parallelApply, parallelPlan, verbose, abortOnExcecutionOrderFail, lockAllProjectsBeforeExec bool, terraformClient terraform.Client, ) (projectCmds []command.ProjectContext) { if prjCfg.PolicyCheck { @@ -192,6 +195,7 @@ func (cb *PolicyCheckProjectCommandContextBuilder) BuildProjectContext( parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, terraformClient, ) @@ -214,6 +218,7 @@ func (cb *PolicyCheckProjectCommandContextBuilder) BuildProjectContext( parallelPlan, verbose, abortOnExcecutionOrderFail, + lockAllProjectsBeforeExec, ctx.Scope, ctx.PullRequestStatus, ctx.PullStatus, @@ -239,6 +244,7 @@ func newProjectCommandContext(ctx *command.Context, parallelPlanEnabled bool, verbose bool, abortOnExcecutionOrderFail bool, + lockAllProjectsBeforeExec bool, scope tally.Scope, pullReqStatus models.PullReqStatus, pullStatus *models.PullStatus, @@ -307,6 +313,7 @@ func newProjectCommandContext(ctx *command.Context, ExecutionOrderGroup: projCfg.ExecutionOrderGroup, AbortOnExcecutionOrderFail: abortOnExcecutionOrderFail, SilencePRComments: projCfg.SilencePRComments, + LockAllProjectsBeforeExec: lockAllProjectsBeforeExec, } } diff --git a/server/events/project_command_context_builder_test.go b/server/events/project_command_context_builder_test.go index c3d75e950c..57df88fd58 100644 --- a/server/events/project_command_context_builder_test.go +++ b/server/events/project_command_context_builder_test.go @@ -61,7 +61,7 @@ func TestProjectCommandContextBuilder_PullStatus(t *testing.T) { }, } - result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, false, false, false, false, terraformClient) + result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, false, false, false, false, false, terraformClient) assert.Equal(t, models.ErroredPolicyCheckStatus, result[0].ProjectPlanStatus) }) @@ -80,7 +80,7 @@ func TestProjectCommandContextBuilder_PullStatus(t *testing.T) { }, } - result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, false, false, false, false, terraformClient) + result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, false, false, false, false, false, terraformClient) assert.Equal(t, models.ErroredPolicyCheckStatus, result[0].ProjectPlanStatus) }) @@ -100,7 +100,7 @@ func TestProjectCommandContextBuilder_PullStatus(t *testing.T) { }, } - result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, true, false, false, false, terraformClient) + result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, true, false, false, false, false, terraformClient) assert.True(t, result[0].ParallelApplyEnabled) assert.False(t, result[0].ParallelPlanEnabled) @@ -121,7 +121,7 @@ func TestProjectCommandContextBuilder_PullStatus(t *testing.T) { }, } - result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, false, false, false, true, terraformClient) + result := subject.BuildProjectContext(commandCtx, command.Plan, "", projCfg, []string{}, "some/dir", false, false, false, false, true, false, terraformClient) assert.True(t, result[0].AbortOnExcecutionOrderFail) }) diff --git a/server/events/project_command_runner.go b/server/events/project_command_runner.go index e97d919820..3a8f3f022f 100644 --- a/server/events/project_command_runner.go +++ b/server/events/project_command_runner.go @@ -126,6 +126,11 @@ type ProjectStateCommandRunner interface { StateRm(ctx command.ProjectContext) command.ProjectResult } +type ProjectLockCommandRunner interface { + // Lock acquires a lock for the project described by ctx. + Lock(ctx command.ProjectContext) command.ProjectResult +} + // ProjectCommandRunner runs project commands. A project command is a command // for a specific TF project. type ProjectCommandRunner interface { @@ -136,6 +141,7 @@ type ProjectCommandRunner interface { ProjectVersionCommandRunner ProjectImportCommandRunner ProjectStateCommandRunner + ProjectLockCommandRunner } //go:generate pegomock generate --package mocks -o mocks/mock_job_url_setter.go JobURLSetter @@ -320,16 +326,28 @@ func (p *DefaultProjectCommandRunner) StateRm(ctx command.ProjectContext) comman } } +// Lock acquires a lock for the project described by ctx. +func (p *DefaultProjectCommandRunner) Lock(ctx command.ProjectContext) command.ProjectResult { + lockAttempt, failure, err := p.tryLock(ctx, ctx.RepoLocksMode != valid.RepoLocksDisabledMode) + res := lockAttempt != nil && lockAttempt.LockAcquired + return command.ProjectResult{ + Command: command.LockCmd, + LockSuccess: &res, + Error: err, + Failure: failure, + RepoRelDir: ctx.RepoRelDir, + Workspace: ctx.Workspace, + ProjectName: ctx.ProjectName, + SilencePRComments: ctx.SilencePRComments, + } +} + func (p *DefaultProjectCommandRunner) doApprovePolicies(ctx command.ProjectContext) (*models.PolicyCheckResults, string, error) { // Acquire Atlantis lock for this repo/dir/workspace. - lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), ctx.RepoLocksMode == valid.RepoLocksOnPlanMode) - if err != nil { - return nil, "", errors.Wrap(err, "acquiring lock") - } - if !lockAttempt.LockAcquired { - return nil, lockAttempt.LockFailureReason, nil + lockAttempt, lockFailure, err := p.tryLock(ctx, ctx.RepoLocksMode == valid.RepoLocksOnPlanMode) + if lockAttempt == nil || !lockAttempt.LockAcquired { + return nil, lockFailure, err } - ctx.Log.Debug("acquired lock for project") // Acquire internal lock for the directory we're going to operate in. unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace, ctx.RepoRelDir) @@ -419,15 +437,11 @@ func (p *DefaultProjectCommandRunner) doPolicyCheck(ctx command.ProjectContext) // we will attempt to capture the lock here but fail to get the working directory // at which point we will unlock again to preserve functionality // If we fail to capture the lock here (super unlikely) then we error out and the user is forced to replan - lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), ctx.RepoLocksMode == valid.RepoLocksOnPlanMode) - - if err != nil { - return nil, "", errors.Wrap(err, "acquiring lock") - } - if !lockAttempt.LockAcquired { - return nil, lockAttempt.LockFailureReason, nil + // Acquire Atlantis lock for this repo/dir/workspace. + lockAttempt, lockFailure, err := p.tryLock(ctx, ctx.RepoLocksMode == valid.RepoLocksOnPlanMode) + if lockAttempt == nil || !lockAttempt.LockAcquired { + return nil, lockFailure, err } - ctx.Log.Debug("acquired lock for project.") // Acquire internal lock for the directory we're going to operate in. // We should refactor this to keep the lock for the duration of plan and policy check since as of now @@ -538,14 +552,10 @@ func (p *DefaultProjectCommandRunner) doPolicyCheck(ctx command.ProjectContext) func (p *DefaultProjectCommandRunner) doPlan(ctx command.ProjectContext) (*models.PlanSuccess, string, error) { // Acquire Atlantis lock for this repo/dir/workspace. - lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), ctx.RepoLocksMode == valid.RepoLocksOnPlanMode) - if err != nil { - return nil, "", errors.Wrap(err, "acquiring lock") + lockAttempt, lockFailure, err := p.tryLock(ctx, ctx.RepoLocksMode == valid.RepoLocksOnPlanMode) + if lockAttempt == nil || !lockAttempt.LockAcquired { + return nil, lockFailure, err } - if !lockAttempt.LockAcquired { - return nil, lockAttempt.LockFailureReason, nil - } - ctx.Log.Debug("acquired lock for project") // Acquire internal lock for the directory we're going to operate in. unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace, ctx.RepoRelDir) @@ -615,14 +625,10 @@ func (p *DefaultProjectCommandRunner) doApply(ctx command.ProjectContext) (apply } // Acquire Atlantis lock for this repo/dir/workspace. - lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), ctx.RepoLocksMode == valid.RepoLocksOnApplyMode) - if err != nil { - return "", "", errors.Wrap(err, "acquiring lock") - } - if !lockAttempt.LockAcquired { - return "", lockAttempt.LockFailureReason, nil + lockAttempt, lockFailure, err := p.tryLock(ctx, ctx.RepoLocksMode == valid.RepoLocksOnApplyMode) + if lockAttempt == nil || !lockAttempt.LockAcquired { + return "", lockFailure, err } - ctx.Log.Debug("acquired lock for project") // Acquire internal lock for the directory we're going to operate in. unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace, ctx.RepoRelDir) @@ -694,14 +700,10 @@ func (p *DefaultProjectCommandRunner) doImport(ctx command.ProjectContext) (out } // Acquire Atlantis lock for this repo/dir/workspace. - lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), ctx.RepoLocksMode != valid.RepoLocksDisabledMode) - if err != nil { - return nil, "", errors.Wrap(err, "acquiring lock") + lockAttempt, lockFailure, err := p.tryLock(ctx, ctx.RepoLocksMode != valid.RepoLocksDisabledMode) + if lockAttempt == nil || !lockAttempt.LockAcquired { + return nil, lockFailure, err } - if !lockAttempt.LockAcquired { - return nil, lockAttempt.LockFailureReason, nil - } - ctx.Log.Debug("acquired lock for project") // Acquire internal lock for the directory we're going to operate in. unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace, ctx.RepoRelDir) @@ -735,14 +737,10 @@ func (p *DefaultProjectCommandRunner) doStateRm(ctx command.ProjectContext) (out } // Acquire Atlantis lock for this repo/dir/workspace. - lockAttempt, err := p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), ctx.RepoLocksMode != valid.RepoLocksDisabledMode) - if err != nil { - return nil, "", errors.Wrap(err, "acquiring lock") - } - if !lockAttempt.LockAcquired { - return nil, lockAttempt.LockFailureReason, nil + lockAttempt, lockFailure, err := p.tryLock(ctx, ctx.RepoLocksMode != valid.RepoLocksDisabledMode) + if lockAttempt == nil || !lockAttempt.LockAcquired { + return nil, lockFailure, err } - ctx.Log.Debug("acquired lock for project") // Acquire internal lock for the directory we're going to operate in. unlockFn, err := p.WorkingDirLocker.TryLock(ctx.Pull.BaseRepo.FullName, ctx.Pull.Num, ctx.Workspace, ctx.RepoRelDir) @@ -809,3 +807,15 @@ func (p *DefaultProjectCommandRunner) runSteps(steps []valid.Step, ctx command.P } return outputs, nil } + +func (p *DefaultProjectCommandRunner) tryLock(ctx command.ProjectContext, repoLocking bool) (lockAttempt *TryLockResponse, failure string, err error) { + lockAttempt, err = p.Locker.TryLock(ctx.Log, ctx.Pull, ctx.User, ctx.Workspace, models.NewProject(ctx.Pull.BaseRepo.FullName, ctx.RepoRelDir, ctx.ProjectName), repoLocking) + if err != nil { + return lockAttempt, "", errors.Wrap(err, "acquiring lock") + } + if !lockAttempt.LockAcquired { + return lockAttempt, lockAttempt.LockFailureReason, nil + } + ctx.Log.Debug("acquired lock for project") + return lockAttempt, "", nil +}